diff --git a/.cargo/config.toml b/.cargo/config.toml index e29275804bd85..4796a2c26965c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -29,5 +29,5 @@ rustflags = [ "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives "-Aclippy::stable_sort_primitive", # prefer stable sort - "-Aclippy::extra-unused-type-parameters" # stylistic + "-Aclippy::extra-unused-type-parameters", # stylistic ] diff --git a/.github/workflows/burnin-label-notification.yml b/.github/workflows/burnin-label-notification.yml index 22f15c0ec35ee..f45455d31db1e 100644 --- a/.github/workflows/burnin-label-notification.yml +++ b/.github/workflows/burnin-label-notification.yml @@ -6,12 +6,19 @@ on: jobs: notify-devops: runs-on: ubuntu-latest + strategy: + matrix: + channel: + - name: 'Team: DevOps' + room: '!lUslSijLMgNcEKcAiE:parity.io' + steps: - name: Notify devops - if: github.event.label.name == 'A1-needsburnin' - uses: s3krit/matrix-message-action@v0.0.3 + if: startsWith(github.event.label.name, 'A1-') + uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 with: - room_id: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ROOM_ID }} - access_token: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ACCESS_TOKEN }} - message: "@room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})" - server: "matrix.parity.io" + room_id: ${{ matrix.channel.room }} + access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} + server: "m.parity.io" + message: | + @room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }}) diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml index 6bec03d27e7be..055207d85a4dd 100644 --- a/.github/workflows/monthly-tag.yml +++ b/.github/workflows/monthly-tag.yml @@ -13,8 +13,8 @@ jobs: - name: Get the tags by date id: tags run: | - echo "::set-output name=new::$(date +'monthly-%Y-%m')" - echo "::set-output name=old::$(date -d'1 month ago' +'monthly-%Y-%m')" + echo "new=$(date +'monthly-%Y-%m')" >> $GITHUB_OUTPUT + echo "old=$(date -d'1 month ago' +'monthly-%Y-%m')" >> $GITHUB_OUTPUT - name: Checkout branch "master" uses: actions/checkout@v3 with: diff --git a/.github/workflows/release-bot.yml b/.github/workflows/release-bot.yml index ed0a8e5435b9c..05bea32abc697 100644 --- a/.github/workflows/release-bot.yml +++ b/.github/workflows/release-bot.yml @@ -8,11 +8,24 @@ on: jobs: ping_matrix: runs-on: ubuntu-latest + strategy: + matrix: + channel: + - name: 'General: Rust, Polkadot, Substrate' + room: '!aJymqQYtCjjqImFLSb:parity.io' + pre-release: false + steps: - name: send message - uses: s3krit/matrix-message-action@v0.0.3 + uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 with: - room_id: ${{ secrets.MATRIX_ROOM_ID }} - access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} - message: "**${{github.event.repository.full_name}}:** A release has been ${{github.event.action}}
Release version [${{github.event.release.tag_name}}](${{github.event.release.html_url}})

***Description:***
${{github.event.release.body}}
" - server: "matrix.parity.io" + room_id: ${{ matrix.channel.room }} + access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} + server: "m.parity.io" + message: | + ***${{github.event.repository.full_name}}:*** A release has been ${{github.event.action}}
+ Release version [${{github.event.release.tag_name}}](${{github.event.release.html_url}}) + + ----- + + ${{github.event.release.body}}
diff --git a/.github/workflows/release-tagging.yml b/.github/workflows/release-tagging.yml index f7fa913c69709..1862582f40eba 100644 --- a/.github/workflows/release-tagging.yml +++ b/.github/workflows/release-tagging.yml @@ -14,7 +14,7 @@ jobs: - name: Set Git tag uses: s3krit/walking-tag-action@d04f7a53b72ceda4e20283736ce3627011275178 # latest version from master with: - TAG_NAME: release - TAG_MESSAGE: Latest release + tag-name: release + tag-message: Latest release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fe70d3532a9b1..f00836528973e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,7 +48,7 @@ variables: CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:bullseye" ARCH: "x86_64" - CI_IMAGE: "paritytech/ci-linux:production" + CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" @@ -61,7 +61,7 @@ variables: NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.52" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.55" default: retry: @@ -301,8 +301,12 @@ include: # completion, because the publishing jobs depends on them AS INTENDED: crates should not be # published before their source code is checked. - project: parity/infrastructure/ci_cd/shared - ref: v0.2 + ref: main file: /common/timestamp.yml + - project: parity/infrastructure/ci_cd/shared + ref: main + file: /common/ci-unified.yml + #### stage: notify diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 38bb4de26362f..ecd384a514563 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -1,11 +1,11 @@ {{header}} -//! Autogenerated weights for {{pallet}} +//! Autogenerated weights for `{{pallet}}` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` //! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` -//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} +//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: `{{cmd.db_cache}}` // Executed Command: {{#each args as |arg|}} @@ -20,7 +20,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for {{pallet}}. +/// Weight functions needed for `{{pallet}}`. pub trait WeightInfo { {{#each benchmarks as |benchmark|}} fn {{benchmark.name~}} @@ -31,7 +31,7 @@ pub trait WeightInfo { {{/each}} } -/// Weights for {{pallet}} using the Substrate node and recommended hardware. +/// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); {{#if (eq pallet "frame_system")}} impl WeightInfo for SubstrateWeight { @@ -78,7 +78,7 @@ impl WeightInfo for SubstrateWeight { {{/each}} } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { {{#each benchmarks as |benchmark|}} {{#each benchmark.comments as |comment|}} diff --git a/Cargo.lock b/Cargo.lock index b29ed95b87766..6cca0a018ca72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,22 +12,13 @@ dependencies = [ "regex", ] -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli 0.26.2", -] - [[package]] name = "addr2line" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.27.2", + "gimli", ] [[package]] @@ -115,9 +106,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead 0.5.2", "aes 0.8.2", @@ -181,13 +172,19 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -276,6 +273,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arbitrary" version = "1.3.0" @@ -288,6 +299,26 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "ark-algebra-test-templates" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "hex", + "num-bigint", + "num-integer", + "num-traits", + "serde", + "serde_derive", + "serde_json", + "sha2 0.10.7", +] + [[package]] name = "ark-bls12-377" version = "0.4.0" @@ -311,6 +342,18 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bw6-761" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e0605daf0cc5aa2034b78d008aaf159f56901d92a52ee4f6ecdfdac4f426700" +dependencies = [ + "ark-bls12-377", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ec" version = "0.4.2" @@ -328,6 +371,30 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-377" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b10d901b9ac4b38f9c32beacedfadcdd64e46f8d7f8e88c1ae1060022cf6f6c6" +dependencies = [ + "ark-bls12-377", + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.4.2" @@ -339,7 +406,7 @@ dependencies = [ "ark-serialize", "ark-std", "derivative", - "digest 0.10.6", + "digest 0.10.7", "itertools", "num-bigint", "num-traits", @@ -384,6 +451,62 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "ark-r1cs-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de1d1472e5cb020cb3405ce2567c91c8d43f21b674aef37b0202f5c3304761db" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-relations", + "ark-std", + "derivative", + "num-bigint", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" +dependencies = [ + "ark-ff", + "ark-std", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ark-scale" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d028cd1025d002fa88c10cd644d29028a7b40806579b608c6ba843b937bbb23" +dependencies = [ + "ark-ec", + "ark-serialize", + "ark-std", + "parity-scale-codec", +] + +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.7", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "ark-serialize" version = "0.4.2" @@ -392,7 +515,7 @@ checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ "ark-serialize-derive", "ark-std", - "digest 0.10.6", + "digest 0.10.7", "num-bigint", ] @@ -418,10 +541,17 @@ dependencies = [ ] [[package]] -name = "array-bytes" -version = "4.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" +name = "ark-transcript" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.7", + "rand_core 0.6.4", + "sha3", +] [[package]] name = "array-bytes" @@ -583,7 +713,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -605,7 +735,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -616,7 +746,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -661,15 +791,36 @@ version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ - "addr2line 0.19.0", + "addr2line", "cc", "cfg-if", "libc", "miniz_oxide 0.6.2", - "object 0.30.3", + "object", "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.7", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.11" @@ -696,9 +847,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -728,7 +879,7 @@ dependencies = [ name = "binary-merkle-tree" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "env_logger 0.9.3", "hash-db", "log", @@ -757,13 +908,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.4", + "prettyplease 0.2.6", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -790,7 +941,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -885,9 +1036,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bounded-collections" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3888522b497857eb606bf51695988dba7096941822c1bcf676e3a929a9ae7a0" +checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ "log", "parity-scale-codec", @@ -903,9 +1054,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "once_cell", @@ -924,9 +1075,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -1038,9 +1189,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8790cf1286da485c72cf5fc7aeba308438800036ec67d89425924c4807268c9" +checksum = "e70d3ad08698a0568b0562f22710fe6bfc1f4a61a367c77d0398c562eadd453a" dependencies = [ "smallvec", ] @@ -1087,7 +1238,7 @@ name = "chain-spec-builder" version = "2.0.0" dependencies = [ "ansi_term", - "clap 4.2.7", + "clap 4.3.2", "node-cli", "rand 0.8.5", "sc-chain-spec", @@ -1098,13 +1249,13 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "time 0.1.45", "wasm-bindgen", @@ -1140,13 +1291,13 @@ dependencies = [ [[package]] name = "cid" -version = "0.8.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2" +checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", "multibase", - "multihash 0.16.3", + "multihash", "serde", "unsigned-varint", ] @@ -1213,9 +1364,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.7" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" +checksum = "401a4694d2bf92537b6867d94de48c4842089645fdcdf6c71865b175d836e9c2" dependencies = [ "clap_builder", "clap_derive", @@ -1224,36 +1375,36 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.2.7" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" +checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980" dependencies = [ "anstream", "anstyle", "bitflags", - "clap_lex 0.4.1", + "clap_lex 0.5.0", "strsim", ] [[package]] name = "clap_complete" -version = "4.2.1" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a19591b2ab0e3c04b588a0e04ddde7b9eaa423646d1b4a8092879216bf47473" +checksum = "7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", ] [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1267,9 +1418,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "codespan-reporting" @@ -1289,15 +1440,35 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "6.1.4" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d" +checksum = "f9e1f7e5d046697d34b593bdba8ee31f4649366e452a2ccabb3baf3511e503d1" dependencies = [ "strum", "strum_macros", "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + +[[package]] +name = "common-path" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" + [[package]] name = "concurrent-queue" version = "2.2.0" @@ -1309,15 +1480,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1326,6 +1497,28 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const-random" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e" +dependencies = [ + "const-random-macro", + "proc-macro-hack", +] + +[[package]] +name = "const-random-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" +dependencies = [ + "getrandom 0.2.9", + "once_cell", + "proc-macro-hack", + "tiny-keccak", +] + [[package]] name = "constant_time_eq" version = "0.2.5" @@ -1383,28 +1576,27 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc42ba2e232e5b20ff7dc299a812d53337dadce9a7e39a238e6a5cb82d2e57b" +checksum = "1277fbfa94bc82c8ec4af2ded3e639d49ca5f7f3c7eeab2c66accd135ece4e70" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "253531aca9b6f56103c9420369db3263e784df39aa1c90685a1f69cfbba0623e" +checksum = "c6e8c31ad3b2270e9aeec38723888fe1b0ace3bea2b06b3f749ccf46661d3220" dependencies = [ - "arrayvec 0.7.2", "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", "cranelift-isle", - "gimli 0.26.2", - "hashbrown 0.12.3", + "gimli", + "hashbrown 0.13.2", "log", "regalloc2", "smallvec", @@ -1413,33 +1605,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f2154365e2bff1b1b8537a7181591fdff50d8e27fa6e40d5c69c3bad0ca7c8" +checksum = "c8ac5ac30d62b2d66f12651f6b606dbdfd9c2cfd0908de6b387560a277c5c9da" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "687e14e3f5775248930e0d5a84195abef8b829958e9794bf8d525104993612b4" +checksum = "dd82b8b376247834b59ed9bdc0ddeb50f517452827d4a11bccf5937b213748b8" [[package]] name = "cranelift-entity" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42ea692c7b450ad18b8c9889661505d51c09ec4380cf1c2d278dbb2da22cae1" +checksum = "40099d38061b37e505e63f89bab52199037a72b931ad4868d9089ff7268660b0" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8483c2db6f45fe9ace984e5adc5d058102227e4c62e5aa2054e16b0275fd3a6e" +checksum = "64a25d9d0a0ae3079c463c34115ec59507b4707175454f0eee0891e83e30e82d" dependencies = [ "cranelift-codegen", "log", @@ -1449,15 +1641,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9793158837678902446c411741d87b43f57dadfb944f2440db4287cda8cbd59" +checksum = "80de6a7d0486e4acbd5f9f87ec49912bf4c8fb6aea00087b989685460d4469ba" [[package]] name = "cranelift-native" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72668c7755f2b880665cb422c8ad2d56db58a88b9bebfef0b73edc2277c13c49" +checksum = "bb6b03e0e03801c4b3fd8ce0758a94750c07a44e7944cc0ffbf0d3f2e7c79b00" dependencies = [ "cranelift-codegen", "libc", @@ -1466,9 +1658,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.93.2" +version = "0.95.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3852ce4b088b44ac4e29459573943009a70d1b192c8d77ef949b4e814f656fc1" +checksum = "ff3220489a3d928ad91e59dd7aeaa8b3de18afb554a6211213673a71c90737ac" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1709,16 +1901,16 @@ dependencies = [ "cfg-if", "fiat-crypto", "packed_simd_2", - "platforms 3.0.2", + "platforms", "subtle", "zeroize", ] [[package]] name = "cxx" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +checksum = "109308c20e8445959c2792e81871054c6a17e6976489a93d2769641a2ba5839c" dependencies = [ "cc", "cxxbridge-flags", @@ -1728,9 +1920,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +checksum = "daf4c6755cdf10798b97510e0e2b3edb9573032bd9379de8fffa59d68165494f" dependencies = [ "cc", "codespan-reporting", @@ -1738,24 +1930,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "cxxbridge-flags" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" +checksum = "882074421238e84fe3b4c65d0081de34e5b323bf64555d3e61991f76eb64a7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1795,15 +1987,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1811,9 +2003,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1832,9 +2024,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e58dffcdcc8ee7b22f0c1f71a69243d7c2d9ad87b5a14361f2424a1565c219" +checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" dependencies = [ "const-oid", "zeroize", @@ -1964,9 +2156,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "const-oid", @@ -2023,7 +2215,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2032,12 +2224,55 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" +[[package]] +name = "dleq_vrf" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.2", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "docify" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "029de870d175d11969524d91a3fb2cbf6d488b853bff99d41cf65e533ac7d9d2" +dependencies = [ + "docify_macros", +] + +[[package]] +name = "docify_macros" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cac43324656a1b05eb0186deb51f27d2d891c704c37f34de281ef6297ba193e5" +dependencies = [ + "common-path", + "derive-syn-parse", + "once_cell", + "proc-macro2", + "quote", + "regex", + "syn 2.0.18", + "termcolor", + "toml 0.7.4", + "walkdir", +] + [[package]] name = "downcast" version = "0.11.0" @@ -2097,15 +2332,16 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.6" +version = "0.16.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" +checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" dependencies = [ - "der 0.7.5", - "digest 0.10.6", - "elliptic-curve 0.13.4", + "der 0.7.6", + "digest 0.10.7", + "elliptic-curve 0.13.5", "rfc6979 0.4.0", "signature 2.1.0", + "spki 0.7.2", ] [[package]] @@ -2160,7 +2396,7 @@ dependencies = [ "base16ct 0.1.1", "crypto-bigint 0.4.9", "der 0.6.1", - "digest 0.10.6", + "digest 0.10.7", "ff 0.12.1", "generic-array 0.14.7", "group 0.12.1", @@ -2175,13 +2411,13 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ "base16ct 0.2.0", "crypto-bigint 0.5.2", - "digest 0.10.6", + "digest 0.10.7", "ff 0.13.0", "generic-array 0.14.7", "group 0.13.0", @@ -2227,7 +2463,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2300,15 +2536,15 @@ dependencies = [ [[package]] name = "expander" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f360349150728553f92e4c997a16af8915f418d3a0f21b440d34c5632f16ed84" +checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" dependencies = [ "blake2", "fs-err", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -2361,6 +2597,19 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -2459,9 +2708,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -2486,7 +2735,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame-benchmarking" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "frame-support", "frame-support-procedural", "frame-system", @@ -2514,9 +2763,9 @@ name = "frame-benchmarking-cli" version = "4.0.0-dev" dependencies = [ "Inflector", - "array-bytes 4.2.0", + "array-bytes", "chrono", - "clap 4.2.7", + "clap 4.3.2", "comfy-table", "frame-benchmarking", "frame-support", @@ -2545,12 +2794,13 @@ dependencies = [ "sp-database", "sp-externalities", "sp-inherents", + "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-std", "sp-storage", "sp-trie", + "sp-wasm-interface", "thiserror", "thousands", ] @@ -2581,7 +2831,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.15", + "syn 2.0.18", "trybuild", ] @@ -2607,7 +2857,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -2624,7 +2874,7 @@ dependencies = [ name = "frame-executive" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "frame-support", "frame-system", "frame-try-runtime", @@ -2643,9 +2893,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "15.1.0" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" +checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" dependencies = [ "cfg-if", "parity-scale-codec", @@ -2658,30 +2908,32 @@ name = "frame-remote-externalities" version = "0.10.0-dev" dependencies = [ "async-recursion", - "frame-support", "futures", "indicatif", "jsonrpsee", "log", - "pallet-elections-phragmen", "parity-scale-codec", "serde", "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", + "sp-tracing", "spinners", "substrate-rpc-client", "tokio", "tokio-retry", - "tracing-subscriber 0.3.17", ] [[package]] name = "frame-support" version = "4.0.0-dev" dependencies = [ + "aquamarine", + "array-bytes", "assert_matches", "bitflags", + "docify", "environmental", "frame-metadata", "frame-support-procedural", @@ -2689,7 +2941,7 @@ dependencies = [ "impl-trait-for-tuples", "k256", "log", - "once_cell", + "macro_magic", "parity-scale-codec", "paste", "pretty_assertions", @@ -2702,6 +2954,7 @@ dependencies = [ "sp-core", "sp-core-hashing-proc-macro", "sp-debug-derive", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-runtime", @@ -2720,12 +2973,14 @@ dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse", + "expander", "frame-support-procedural-tools", "itertools", + "macro_magic", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2736,7 +2991,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2745,7 +3000,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -2770,6 +3025,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-version", + "static_assertions", "trybuild", ] @@ -2794,6 +3050,8 @@ dependencies = [ "frame-system", "parity-scale-codec", "scale-info", + "serde", + "sp-runtime", ] [[package]] @@ -2871,9 +3129,9 @@ dependencies = [ [[package]] name = "fs4" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f5b6908aecca5812a4569056285e58c666588c9573ee59765bf1d3692699e2" +checksum = "7672706608ecb74ab2e055c68327ffc25ae4cac1e12349204fd5fb0f3487cce2" dependencies = [ "rustix 0.37.19", "windows-sys 0.48.0", @@ -2963,7 +3221,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -3030,9 +3288,9 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "git2", "num-format", "pallet-staking", + "sp-staking", ] [[package]] @@ -3083,8 +3341,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -3109,9 +3369,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" dependencies = [ "fallible-iterator", "indexmap", @@ -3119,29 +3379,8 @@ dependencies = [ ] [[package]] -name = "gimli" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" - -[[package]] -name = "git2" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf7f68c2995f392c49fffb4f95ae2c873297830eb25c6bc4c114ce8f4562acc" -dependencies = [ - "bitflags", - "libc", - "libgit2-sys", - "log", - "openssl-probe", - "openssl-sys", - "url", -] - -[[package]] -name = "glob" -version = "0.3.1" +name = "glob" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" @@ -3182,9 +3421,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -3207,9 +3446,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -3288,12 +3527,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hkdf" version = "0.12.3" @@ -3329,7 +3562,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3448,10 +3681,25 @@ dependencies = [ "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "webpki-roots", ] +[[package]] +name = "hyper-rustls" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +dependencies = [ + "http", + "hyper", + "log", + "rustls 0.21.1", + "rustls-native-certs", + "tokio", + "tokio-rustls 0.24.0", +] + [[package]] name = "iana-time-zone" version = "0.1.56" @@ -3468,12 +3716,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -3495,9 +3742,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3541,6 +3788,17 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "impl-num-traits" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "951641f13f873bff03d4bf19ae8bec531935ac0ac2cc775f84d7edfdcfed3f17" +dependencies = [ + "integer-sqrt", + "num-traits", + "uint", +] + [[package]] name = "impl-serde" version = "0.4.0" @@ -3561,6 +3819,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -3580,13 +3857,14 @@ checksum = "8e04e2fd2b8188ea827b32ef11de88377086d690286ab35747ef7f9bf3ccb590" [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057" dependencies = [ "console", + "instant", "number_prefix", - "portable-atomic 0.3.20", + "portable-atomic", "unicode-width", ] @@ -3636,11 +3914,17 @@ dependencies = [ "webrtc-util", ] +[[package]] +name = "intx" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f38a50a899dc47a6d0ed5508e7f601a2e34c3a85303514b5d137f3c10a0c75" + [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.1", "libc", @@ -3709,13 +3993,24 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.62" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f54898088ccb91df1b492cc80029a6fdf1c48ca0db7c6822a8babad69c94658" +dependencies = [ + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "jsonrpsee" version = "0.16.2" @@ -3746,7 +4041,7 @@ dependencies = [ "soketto", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-util", "tracing", "webpki-roots", @@ -3788,7 +4083,7 @@ checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" dependencies = [ "async-trait", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.2", "jsonrpsee-core", "jsonrpsee-types", "rustc-hash", @@ -3867,10 +4162,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa 0.16.6", - "elliptic-curve 0.13.4", + "ecdsa 0.16.7", + "elliptic-curve 0.13.5", "once_cell", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -3909,6 +4204,8 @@ dependencies = [ "log", "node-primitives", "pallet-alliance", + "pallet-asset-conversion", + "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", @@ -3940,6 +4237,7 @@ dependencies = [ "pallet-message-queue", "pallet-mmr", "pallet-multisig", + "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", @@ -3977,6 +4275,7 @@ dependencies = [ "pallet-vesting", "pallet-whitelist", "parity-scale-codec", + "primitive-types", "scale-info", "sp-api", "sp-authority-discovery", @@ -4051,23 +4350,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" - -[[package]] -name = "libgit2-sys" -version = "0.14.2+1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f3d95f6b51075fe9810a7ae22c7095f12b98005ab364d8544797a825ce946a4" -dependencies = [ - "cc", - "libc", - "libssh2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", -] +checksum = "fc86cde3ff845662b8f4ef6cb50ea0e20c524eb3d29ae048287e06a1b3fa6a81" [[package]] name = "libloading" @@ -4087,9 +4372,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libp2p" @@ -4163,7 +4448,7 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash 0.17.0", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4205,7 +4490,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "lru 0.10.0", + "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", @@ -4223,10 +4508,10 @@ dependencies = [ "ed25519-dalek", "log", "multiaddr", - "multihash 0.17.0", + "multihash", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", "zeroize", ] @@ -4251,7 +4536,7 @@ dependencies = [ "log", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "smallvec", "thiserror", "uint", @@ -4309,7 +4594,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "snow", "static_assertions", "thiserror", @@ -4431,7 +4716,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen 0.10.0", - "ring", + "ring 0.16.20", "rustls 0.20.8", "thiserror", "webpki 0.22.0", @@ -4470,7 +4755,7 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "log", - "multihash 0.17.0", + "multihash", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -4579,20 +4864,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "libssh2-sys" -version = "0.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b094a36eb4b8b8c8a7b4b8ae43b2944502be3e59cd87687595cf6b0a71b3f4ca" -dependencies = [ - "cc", - "libc", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", -] - [[package]] name = "libz-sys" version = "1.1.9" @@ -4600,7 +4871,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] @@ -4646,9 +4916,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lite-json" @@ -4670,9 +4940,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -4680,21 +4950,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "lru" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" -dependencies = [ - "hashbrown 0.12.3", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "lru" @@ -4743,6 +5001,54 @@ dependencies = [ "libc", ] +[[package]] +name = "macro_magic" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aee866bfee30d2d7e83835a4574aad5b45adba4cc807f2a3bbba974e5d4383c9" +dependencies = [ + "macro_magic_core", + "macro_magic_macros", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "macro_magic_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e766a20fd9c72bab3e1e64ed63f36bd08410e75803813df210d1ce297d7ad00" +dependencies = [ + "const-random", + "derive-syn-parse", + "macro_magic_core_macros", + "proc-macro2", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "macro_magic_core_macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "macro_magic_macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" +dependencies = [ + "macro_magic_core", + "quote", + "syn 2.0.18", +] + [[package]] name = "maplit" version = "1.0.2" @@ -4764,15 +5070,6 @@ dependencies = [ "regex-automata", ] -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata", -] - [[package]] name = "matches" version = "0.1.10" @@ -4795,7 +5092,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4858,12 +5155,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - [[package]] name = "merlin" version = "2.0.1" @@ -4876,6 +5167,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4902,14 +5205,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4989,7 +5291,7 @@ dependencies = [ "data-encoding", "log", "multibase", - "multihash 0.17.0", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -5010,34 +5312,21 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.3" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", "core2", - "digest 0.10.6", + "digest 0.10.7", "multihash-derive", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "unsigned-varint", ] -[[package]] -name = "multihash" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" -dependencies = [ - "core2", - "digest 0.10.6", - "multihash-derive", - "sha2 0.10.6", - "unsigned-varint", -] - [[package]] name = "multihash-derive" version = "0.8.1" @@ -5204,8 +5493,8 @@ dependencies = [ name = "node-bench" version = "0.9.0-dev" dependencies = [ - "array-bytes 4.2.0", - "clap 4.2.7", + "array-bytes", + "clap 4.3.2", "derive_more", "fs_extra", "futures", @@ -5240,9 +5529,9 @@ dependencies = [ name = "node-cli" version = "3.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_cmd", - "clap 4.2.7", + "clap 4.3.2", "clap_complete", "criterion", "frame-benchmarking-cli", @@ -5257,14 +5546,14 @@ dependencies = [ "node-inspect", "node-primitives", "node-rpc", + "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", "pallet-balances", "pallet-im-online", "pallet-timestamp", - "pallet-transaction-payment", "parity-scale-codec", - "platforms 2.0.0", + "platforms", "rand 0.8.5", "regex", "sc-authority-discovery", @@ -5285,6 +5574,7 @@ dependencies = [ "sc-network-common", "sc-network-statement", "sc-network-sync", + "sc-offchain", "sc-rpc", "sc-service", "sc-service-test", @@ -5310,9 +5600,9 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-runtime", + "sp-statement-store", "sp-timestamp", "sp-tracing", - "sp-transaction-pool", "sp-transaction-storage-proof", "substrate-build-script-utils", "substrate-cli-test-utils", @@ -5367,11 +5657,10 @@ dependencies = [ name = "node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "parity-scale-codec", "sc-cli", "sc-client-api", - "sc-executor", "sc-service", "sp-blockchain", "sp-core", @@ -5383,10 +5672,6 @@ dependencies = [ name = "node-primitives" version = "2.0.0" dependencies = [ - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-application-crypto", "sp-core", "sp-runtime", ] @@ -5426,7 +5711,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "generate-bags", "kitchensink-runtime", ] @@ -5435,7 +5720,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -5451,19 +5736,16 @@ dependencies = [ "sc-consensus-aura", "sc-consensus-grandpa", "sc-executor", - "sc-keystore", "sc-network", - "sc-rpc", + "sc-offchain", "sc-rpc-api", "sc-service", - "sc-statement-store", "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", "sp-api", "sp-block-builder", "sp-blockchain", - "sp-consensus", "sp-consensus-aura", "sp-consensus-grandpa", "sp-core", @@ -5481,10 +5763,9 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "flate2", "fs_extra", - "git2", "glob", "itertools", "tar", @@ -5539,9 +5820,10 @@ dependencies = [ "log", "node-executor", "node-primitives", + "pallet-asset-conversion", + "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", - "pallet-transaction-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -5585,16 +5867,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num" version = "0.4.0" @@ -5679,7 +5951,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.6", + "libm 0.2.7", ] [[package]] @@ -5700,25 +5972,16 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.29.0" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "crc32fast", - "hashbrown 0.12.3", + "hashbrown 0.13.2", "indexmap", "memchr", ] -[[package]] -name = "object" -version = "0.30.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" -dependencies = [ - "memchr", -] - [[package]] name = "oid-registry" version = "0.4.0" @@ -5739,9 +6002,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -5767,18 +6030,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-sys" -version = "0.9.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "os_str_bytes" version = "6.5.0" @@ -5794,12 +6045,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.11.1" @@ -5808,7 +6053,7 @@ checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -5819,7 +6064,7 @@ checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -5836,7 +6081,7 @@ dependencies = [ name = "pallet-alliance" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "frame-benchmarking", "frame-support", "frame-system", @@ -5853,6 +6098,45 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-asset-conversion" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "parity-scale-codec", + "primitive-types", + "scale-info", + "sp-api", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-asset-conversion-tx-payment" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-transaction-payment", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-asset-rate" version = "4.0.0-dev" @@ -6003,6 +6287,8 @@ dependencies = [ name = "pallet-bags-list" version = "4.0.0-dev" dependencies = [ + "aquamarine", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -6094,7 +6380,7 @@ dependencies = [ name = "pallet-beefy-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "binary-merkle-tree", "frame-support", "frame-system", @@ -6171,7 +6457,7 @@ dependencies = [ name = "pallet-contracts" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "bitflags", "env_logger 0.9.3", @@ -6202,14 +6488,13 @@ dependencies = [ "sp-runtime", "sp-std", "wasm-instrument 0.4.0", - "wasmi 0.28.0", - "wasmparser-nostd", + "wasmi", "wat", ] [[package]] name = "pallet-contracts-primitives" -version = "7.0.0" +version = "24.0.0" dependencies = [ "bitflags", "parity-scale-codec", @@ -6225,7 +6510,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -6264,6 +6549,20 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-default-config-example" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-democracy" version = "4.0.0-dev" @@ -6315,6 +6614,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "parity-scale-codec", + "parking_lot 0.12.1", "scale-info", "sp-core", "sp-io", @@ -6377,6 +6677,7 @@ dependencies = [ "sp-io", "sp-npos-elections", "sp-runtime", + "sp-staking", "sp-std", "sp-tracing", "substrate-test-utils", @@ -6399,6 +6700,23 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-example-kitchensink" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-example-offchain-worker" version = "4.0.0-dev" @@ -6416,10 +6734,38 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-example-split" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "pallet-examples" +version = "4.0.0-dev" +dependencies = [ + "pallet-default-config-example", + "pallet-dev-mode", + "pallet-example-basic", + "pallet-example-kitchensink", + "pallet-example-offchain-worker", + "pallet-example-split", +] + [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -6616,7 +6962,7 @@ dependencies = [ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "env_logger 0.9.3", "frame-benchmarking", "frame-support", @@ -6642,6 +6988,24 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-nft-fractionalization" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-assets", + "pallet-balances", + "pallet-nfts", + "parity-scale-codec", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -6856,10 +7220,37 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-staking", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-paged-list" +version = "0.1.0" +dependencies = [ + "docify", + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", "sp-std", ] +[[package]] +name = "pallet-paged-list-fuzzer" +version = "0.1.0" +dependencies = [ + "arbitrary", + "frame-support", + "honggfuzz", + "pallet-paged-list", + "sp-io", +] + [[package]] name = "pallet-preimage" version = "4.0.0-dev" @@ -7021,6 +7412,7 @@ dependencies = [ name = "pallet-scheduler" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -7098,13 +7490,16 @@ dependencies = [ name = "pallet-society" version = "4.0.0-dev" dependencies = [ + "frame-benchmarking", "frame-support", "frame-support-test", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "rand_chacha 0.2.2", "scale-info", + "sp-arithmetic", "sp-core", "sp-io", "sp-runtime", @@ -7149,7 +7544,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -7325,7 +7720,7 @@ dependencies = [ name = "pallet-transaction-storage" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "frame-benchmarking", "frame-support", "frame-system", @@ -7454,9 +7849,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +checksum = "2287753623c76f953acd29d15d8100bcab84d29db78fb6f352adb3c53e83b967" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -7469,9 +7864,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2b6937b5e67bfba3351b87b040d48352a2fcb6ad72f81855412ce97b45c8f110" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7515,7 +7910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -7534,17 +7929,23 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.0", ] +[[package]] +name = "partial_sort" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" + [[package]] name = "paste" version = "1.0.12" @@ -7566,7 +7967,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7595,9 +7996,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" @@ -7629,7 +8030,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -7640,7 +8041,7 @@ checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -7655,22 +8056,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -7707,7 +8108,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.5", + "der 0.7.6", "spki 0.7.2", ] @@ -7717,12 +8118,6 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" -[[package]] -name = "platforms" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" - [[package]] name = "platforms" version = "3.0.2" @@ -7805,23 +8200,14 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.0", -] - -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.1", + "universal-hash 0.5.1", ] [[package]] name = "portable-atomic" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bbda379e6e462c97ea6afe9f6233619b202bbc4968d7caa6917788d2070a044" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -7895,12 +8281,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ "proc-macro2", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -7911,6 +8297,7 @@ checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", + "impl-num-traits", "impl-serde", "scale-info", "uint", @@ -7950,22 +8337,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro-warning" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e99670bafb56b9a106419397343bdbc8b8742c3cc449fec6345f86173f47cd4" +checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -8126,7 +8519,7 @@ checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls 0.20.8", "slab", @@ -8138,9 +8531,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -8276,7 +8669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "x509-parser 0.13.2", "yasna", @@ -8289,7 +8682,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "yasna", ] @@ -8340,14 +8733,14 @@ checksum = "8d2275aab483050ab2a7364c1a46604865ee7d6906684e08db0f090acf74f9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "regalloc2" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300d4fbfb40c1c66a78ba3ddd41c1110247cf52f97b87d0f2fc9209bd49b030c" +checksum = "80535183cae11b149d618fbd3c37e38d7cda589d82d7769e196ca9a9042d7621" dependencies = [ "fxhash", "log", @@ -8357,13 +8750,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ - "aho-corasick 1.0.1", + "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -8383,21 +8776,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" - -[[package]] -name = "region" -version = "3.0.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e" -dependencies = [ - "bitflags", - "libc", - "mach", - "winapi", -] +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "resolv-conf" @@ -8430,6 +8811,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -8563,9 +8959,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.13" +version = "0.36.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a38f9520be93aba504e8ca974197f46158de5dcaa9fa04b57c57cd6a679d658" +checksum = "14e4d67015953998ad0eb82887a0eb0129e18a7e2f3b7b0f6c422fddcd503d62" dependencies = [ "bitflags", "errno", @@ -8585,7 +8981,7 @@ dependencies = [ "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.7", + "linux-raw-sys 0.3.8", "windows-sys 0.48.0", ] @@ -8597,7 +8993,7 @@ checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.1", "log", - "ring", + "ring 0.16.20", "sct 0.6.1", "webpki 0.21.4", ] @@ -8609,11 +9005,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct 0.7.0", "webpki 0.22.0", ] +[[package]] +name = "rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +dependencies = [ + "log", + "ring 0.16.20", + "rustls-webpki", + "sct 0.7.0", +] + [[package]] name = "rustls-native-certs" version = "0.6.2" @@ -8632,7 +9040,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring 0.16.20", + "untrusted", ] [[package]] @@ -8716,7 +9134,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "multihash 0.17.0", + "multihash", "parity-scale-codec", "prost", "prost-build", @@ -8724,7 +9142,6 @@ dependencies = [ "rand 0.8.5", "sc-client-api", "sc-network", - "sc-network-common", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -8803,16 +9220,16 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "sc-cli" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "chrono", - "clap 4.2.7", + "clap 4.3.2", "fdlimit", "futures", "futures-timer", @@ -8827,7 +9244,6 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", - "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -8866,7 +9282,6 @@ dependencies = [ "sp-core", "sp-database", "sp-externalities", - "sp-keystore", "sp-runtime", "sp-state-machine", "sp-statement-store", @@ -8881,7 +9296,7 @@ dependencies = [ name = "sc-client-db" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "criterion", "hash-db", "kitchensink-runtime", @@ -8992,10 +9407,10 @@ dependencies = [ "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", - "sc-keystore", "sc-network", "sc-network-test", "sc-telemetry", + "sc-transaction-pool-api", "scale-info", "sp-api", "sp-application-crypto", @@ -9028,6 +9443,7 @@ dependencies = [ "sc-consensus-epochs", "sc-keystore", "sc-rpc-api", + "sc-transaction-pool-api", "serde", "serde_json", "sp-api", @@ -9048,7 +9464,8 @@ dependencies = [ name = "sc-consensus-beefy" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", + "async-channel", "async-trait", "fnv", "futures", @@ -9058,9 +9475,7 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-consensus", - "sc-keystore", "sc-network", - "sc-network-common", "sc-network-gossip", "sc-network-sync", "sc-network-test", @@ -9125,7 +9540,7 @@ name = "sc-consensus-grandpa" version = "0.10.0-dev" dependencies = [ "ahash 0.8.3", - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "async-trait", "dyn-clone", @@ -9146,6 +9561,7 @@ dependencies = [ "sc-network-gossip", "sc-network-test", "sc-telemetry", + "sc-transaction-pool-api", "sc-utils", "serde", "serde_json", @@ -9279,21 +9695,20 @@ dependencies = [ name = "sc-executor" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "criterion", "env_logger 0.9.3", - "lru 0.8.1", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", "paste", "regex", "sc-executor-common", - "sc-executor-wasmi", "sc-executor-wasmtime", "sc-runtime-test", "sc-tracing", + "schnellru", "sp-api", "sp-core", "sp-externalities", @@ -9310,8 +9725,7 @@ dependencies = [ "substrate-test-runtime", "tempfile", "tracing", - "tracing-subscriber 0.2.25", - "wasmi 0.13.2", + "tracing-subscriber", "wat", ] @@ -9324,19 +9738,6 @@ dependencies = [ "sp-wasm-interface", "thiserror", "wasm-instrument 0.3.0", - "wasmi 0.13.2", -] - -[[package]] -name = "sc-executor-wasmi" -version = "0.10.0-dev" -dependencies = [ - "log", - "sc-allocator", - "sc-executor-common", - "sp-runtime-interface", - "sp-wasm-interface", - "wasmi 0.13.2", ] [[package]] @@ -9348,10 +9749,9 @@ dependencies = [ "cfg-if", "libc", "log", - "once_cell", "parity-scale-codec", "paste", - "rustix 0.36.13", + "rustix 0.36.14", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -9382,7 +9782,7 @@ dependencies = [ name = "sc-keystore" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "parking_lot 0.12.1", "serde_json", "sp-application-crypto", @@ -9396,7 +9796,7 @@ dependencies = [ name = "sc-network" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "async-channel", "async-trait", @@ -9410,28 +9810,23 @@ dependencies = [ "libp2p", "linked_hash_set", "log", - "lru 0.8.1", "mockall", "multistream-select", "parity-scale-codec", "parking_lot 0.12.1", + "partial_sort", "pin-project", "rand 0.8.5", - "sc-block-builder", "sc-client-api", - "sc-consensus", "sc-network-common", "sc-network-light", "sc-network-sync", - "sc-peerset", "sc-utils", "serde", "serde_json", "smallvec", - "snow", "sp-arithmetic", "sp-blockchain", - "sp-consensus", "sp-core", "sp-runtime", "sp-test-primitives", @@ -9445,6 +9840,7 @@ dependencies = [ "tokio-test", "tokio-util", "unsigned-varint", + "wasm-timer", "zeroize", ] @@ -9452,6 +9848,7 @@ dependencies = [ name = "sc-network-bitswap" version = "0.10.0-dev" dependencies = [ + "async-channel", "cid", "futures", "libp2p-identity", @@ -9462,7 +9859,6 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-network", - "sc-network-common", "sp-blockchain", "sp-consensus", "sp-core", @@ -9478,28 +9874,17 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", "async-trait", "bitflags", - "bytes", "futures", - "futures-timer", "libp2p-identity", "parity-scale-codec", "prost-build", "sc-consensus", - "sc-peerset", - "sc-utils", - "serde", - "smallvec", - "sp-blockchain", "sp-consensus", "sp-consensus-grandpa", "sp-runtime", - "substrate-prometheus-endpoint", "tempfile", - "thiserror", - "zeroize", ] [[package]] @@ -9511,11 +9896,10 @@ dependencies = [ "futures-timer", "libp2p", "log", - "lru 0.8.1", "quickcheck", "sc-network", "sc-network-common", - "sc-peerset", + "schnellru", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -9527,7 +9911,8 @@ dependencies = [ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", + "async-channel", "futures", "libp2p-identity", "log", @@ -9536,8 +9921,6 @@ dependencies = [ "prost-build", "sc-client-api", "sc-network", - "sc-network-common", - "sc-peerset", "sp-blockchain", "sp-core", "sp-runtime", @@ -9548,18 +9931,15 @@ dependencies = [ name = "sc-network-statement" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "futures", "libp2p", "log", "parity-scale-codec", - "pin-project", "sc-network", "sc-network-common", - "sc-peerset", "sp-consensus", - "sp-runtime", "sp-statement-store", "substrate-prometheus-endpoint", ] @@ -9568,14 +9948,14 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", + "async-channel", "async-trait", "fork-tree", "futures", "futures-timer", "libp2p", "log", - "lru 0.8.1", "mockall", "parity-scale-codec", "prost", @@ -9586,8 +9966,8 @@ dependencies = [ "sc-consensus", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", + "schnellru", "smallvec", "sp-arithmetic", "sp-blockchain", @@ -9625,7 +10005,6 @@ dependencies = [ "sc-utils", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", "sp-core", "sp-runtime", "sp-tracing", @@ -9638,15 +10017,13 @@ dependencies = [ name = "sc-network-transactions" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "futures", "libp2p", "log", "parity-scale-codec", - "pin-project", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", "sp-consensus", "sp-runtime", @@ -9657,15 +10034,16 @@ dependencies = [ name = "sc-offchain" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "bytes", "fnv", "futures", "futures-timer", "hyper", - "hyper-rustls", + "hyper-rustls 0.24.0", "lazy_static", "libp2p", + "log", "num_cpus", "once_cell", "parity-scale-codec", @@ -9676,13 +10054,14 @@ dependencies = [ "sc-client-db", "sc-network", "sc-network-common", - "sc-peerset", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "sp-api", "sp-consensus", "sp-core", + "sp-externalities", + "sp-keystore", "sp-offchain", "sp-runtime", "sp-tracing", @@ -9692,19 +10071,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "sc-peerset" -version = "4.0.0-dev" -dependencies = [ - "futures", - "libp2p-identity", - "log", - "rand 0.8.5", - "sc-utils", - "serde_json", - "wasm-timer", -] - [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" @@ -9724,6 +10090,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.1", + "pretty_assertions", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -9787,7 +10154,7 @@ dependencies = [ name = "sc-rpc-spec-v2" version = "0.10.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "futures", "futures-util", @@ -9796,6 +10163,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.1", + "pretty_assertions", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -9859,11 +10227,9 @@ dependencies = [ "sc-network-light", "sc-network-sync", "sc-network-transactions", - "sc-offchain", "sc-rpc", "sc-rpc-server", "sc-rpc-spec-v2", - "sc-storage-monitor", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -9901,7 +10267,7 @@ dependencies = [ name = "sc-service-test" version = "2.0.0" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "fdlimit", "futures", @@ -9914,7 +10280,6 @@ dependencies = [ "sc-consensus", "sc-executor", "sc-network", - "sc-network-common", "sc-network-sync", "sc-service", "sc-transaction-pool-api", @@ -9948,21 +10313,17 @@ dependencies = [ name = "sc-statement-store" version = "4.0.0-dev" dependencies = [ - "async-trait", "env_logger 0.9.3", - "futures", - "futures-timer", "log", "parity-db", - "parity-scale-codec", "parking_lot 0.12.1", "sc-client-api", + "sc-keystore", "sp-api", "sp-blockchain", "sp-core", "sp-runtime", "sp-statement-store", - "sp-tracing", "substrate-prometheus-endpoint", "tempfile", "tokio", @@ -9972,12 +10333,10 @@ dependencies = [ name = "sc-storage-monitor" version = "0.1.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "fs4", - "futures", "log", "sc-client-db", - "sc-utils", "sp-core", "thiserror", "tokio", @@ -10049,12 +10408,10 @@ dependencies = [ "lazy_static", "libc", "log", - "once_cell", "parking_lot 0.12.1", "regex", "rustc-hash", "sc-client-api", - "sc-rpc-server", "sc-tracing-proc-macro", "serde", "sp-api", @@ -10066,7 +10423,7 @@ dependencies = [ "thiserror", "tracing", "tracing-log", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -10076,14 +10433,14 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "async-trait", "criterion", @@ -10091,7 +10448,6 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "num-traits", "parity-scale-codec", "parking_lot 0.12.1", "sc-block-builder", @@ -10120,9 +10476,11 @@ dependencies = [ "async-trait", "futures", "log", + "parity-scale-codec", "serde", "serde_json", "sp-blockchain", + "sp-core", "sp-runtime", "thiserror", ] @@ -10144,9 +10502,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "bitvec", "cfg-if", @@ -10170,11 +10528,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -10198,7 +10556,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -10224,7 +10582,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10234,7 +10592,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10271,7 +10629,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" dependencies = [ "base16ct 0.2.0", - "der 0.7.5", + "der 0.7.6", "generic-array 0.14.7", "pkcs8 0.10.2", "subtle", @@ -10307,9 +10665,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -10320,9 +10678,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -10363,22 +10721,22 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.162" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.162" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -10394,9 +10752,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" dependencies = [ "serde", ] @@ -10422,7 +10780,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -10452,13 +10810,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -10467,7 +10825,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -10501,7 +10859,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -10511,7 +10869,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -10551,9 +10909,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snap" @@ -10572,9 +10930,9 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version 0.4.0", - "sha2 0.10.6", + "sha2 0.10.7", "subtle", ] @@ -10615,6 +10973,7 @@ dependencies = [ "scale-info", "sp-api-proc-macro", "sp-core", + "sp-externalities", "sp-metadata-ir", "sp-runtime", "sp-state-machine", @@ -10636,7 +10995,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -10649,6 +11008,7 @@ dependencies = [ "parity-scale-codec", "rustversion", "sc-block-builder", + "scale-info", "sp-api", "sp-consensus", "sp-core", @@ -10656,62 +11016,154 @@ dependencies = [ "sp-state-machine", "sp-tracing", "sp-version", + "static_assertions", "substrate-test-runtime-client", "trybuild", ] [[package]] name = "sp-application-crypto" -version = "7.0.0" +version = "23.0.0" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-application-crypto-test" +version = "2.0.0" +dependencies = [ + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "substrate-test-runtime-client", +] + +[[package]] +name = "sp-arithmetic" +version = "16.0.0" +dependencies = [ + "criterion", + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "primitive-types", + "rand 0.8.5", + "scale-info", + "serde", + "sp-core", + "sp-std", + "static_assertions", +] + +[[package]] +name = "sp-arithmetic-fuzzer" +version = "2.0.0" +dependencies = [ + "arbitrary", + "fraction", + "honggfuzz", + "num-bigint", + "sp-arithmetic", +] + +[[package]] +name = "sp-ark-bls12-377" +version = "0.4.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e61a06f286f4e8565a67865ef52e83edabf447881898c94527ffc7b839177" +dependencies = [ + "ark-bls12-377", + "ark-ff", + "ark-r1cs-std", + "ark-scale", + "ark-std", + "parity-scale-codec", + "sp-ark-models", +] + +[[package]] +name = "sp-ark-bls12-381" +version = "0.4.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3352feef6c9c34022fa766a0c9a86a88a83d280a3e5b34781a1a9af98377a130" +dependencies = [ + "ark-bls12-381", + "ark-ff", + "ark-scale", + "ark-serialize", + "ark-std", + "parity-scale-codec", + "sp-ark-models", +] + +[[package]] +name = "sp-ark-bw6-761" +version = "0.4.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf069165e230aef3c4680edea2d8ab3caa89c039e0b61fad2b8e061fb393668" dependencies = [ + "ark-bw6-761", + "ark-ff", + "ark-scale", + "ark-std", "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-std", + "sp-ark-models", ] [[package]] -name = "sp-application-crypto-test" -version = "2.0.0" +name = "sp-ark-ed-on-bls12-377" +version = "0.4.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e63f1fe8e7e87cb0258d61212b019d4d0fd230293ec42a564eb671c83d437497" dependencies = [ - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-keystore", - "sp-runtime", - "substrate-test-runtime-client", + "ark-ed-on-bls12-377", + "ark-ff", + "ark-r1cs-std", + "ark-scale", + "ark-serialize", + "ark-std", + "parity-scale-codec", + "sp-ark-models", ] [[package]] -name = "sp-arithmetic" -version = "6.0.0" +name = "sp-ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "838ddc5508aff3e89f930e7e7f3565d0786ac27868cfd61587afe681011e1140" dependencies = [ - "criterion", - "integer-sqrt", - "num-traits", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-r1cs-std", + "ark-scale", + "ark-std", "parity-scale-codec", - "primitive-types", - "rand 0.8.5", - "scale-info", - "serde", - "sp-core", - "sp-std", - "static_assertions", + "sp-ark-bls12-381", + "sp-ark-models", ] [[package]] -name = "sp-arithmetic-fuzzer" -version = "2.0.0" +name = "sp-ark-models" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28fa906b809d7a346b2aa32a4bd0c884a75f9f588f9a4a07272f63eaf8a10765" dependencies = [ - "arbitrary", - "fraction", - "honggfuzz", - "num-bigint", + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "getrandom 0.2.9", + "itertools", "num-traits", - "primitive-types", - "sp-arithmetic", + "zeroize", ] [[package]] @@ -10730,7 +11182,6 @@ dependencies = [ name = "sp-block-builder" version = "4.0.0-dev" dependencies = [ - "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -10743,9 +11194,9 @@ version = "4.0.0-dev" dependencies = [ "futures", "log", - "lru 0.8.1", "parity-scale-codec", "parking_lot 0.12.1", + "schnellru", "sp-api", "sp-consensus", "sp-database", @@ -10778,7 +11229,6 @@ dependencies = [ "scale-info", "sp-api", "sp-application-crypto", - "sp-consensus", "sp-consensus-slots", "sp-inherents", "sp-runtime", @@ -10796,11 +11246,9 @@ dependencies = [ "serde", "sp-api", "sp-application-crypto", - "sp-consensus", "sp-consensus-slots", "sp-core", "sp-inherents", - "sp-keystore", "sp-runtime", "sp-std", "sp-timestamp", @@ -10810,7 +11258,7 @@ dependencies = [ name = "sp-consensus-beefy" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "lazy_static", "parity-scale-codec", "scale-info", @@ -10819,11 +11267,11 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-io", - "sp-keystore", "sp-mmr-primitives", "sp-runtime", "sp-std", "strum", + "w3f-bls", ] [[package]] @@ -10867,9 +11315,11 @@ dependencies = [ [[package]] name = "sp-core" -version = "7.0.0" +version = "21.0.0" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", + "arrayvec 0.7.2", + "bandersnatch_vrfs", "bitflags", "blake2", "bounded-collections", @@ -10880,12 +11330,11 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "hex-literal", "impl-serde", "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -10909,31 +11358,57 @@ dependencies = [ "substrate-bip39", "thiserror", "tiny-bip39", + "tracing", "w3f-bls", "zeroize", ] [[package]] name = "sp-core-hashing" -version = "5.0.0" +version = "9.0.0" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.10.6", - "sha2 0.10.6", + "digest 0.10.7", + "sha2 0.10.7", "sha3", - "sp-std", "twox-hash", ] [[package]] name = "sp-core-hashing-proc-macro" -version = "5.0.0" +version = "9.0.0" dependencies = [ - "proc-macro2", "quote", "sp-core-hashing", - "syn 2.0.15", + "syn 2.0.18", +] + +[[package]] +name = "sp-crypto-ec-utils" +version = "0.4.0" +dependencies = [ + "ark-algebra-test-templates", + "ark-bls12-377", + "ark-bls12-381", + "ark-bw6-761", + "ark-ec", + "ark-ed-on-bls12-377", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-scale", + "ark-serialize", + "ark-std", + "parity-scale-codec", + "sp-ark-bls12-377", + "sp-ark-bls12-381", + "sp-ark-bw6-761", + "sp-ark-ed-on-bls12-377", + "sp-ark-ed-on-bls12-381-bandersnatch", + "sp-ark-models", + "sp-io", + "sp-runtime-interface", + "sp-std", ] [[package]] @@ -10946,16 +11421,16 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "5.0.0" +version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "sp-externalities" -version = "0.13.0" +version = "0.19.0" dependencies = [ "environmental", "parity-scale-codec", @@ -10963,6 +11438,16 @@ dependencies = [ "sp-storage", ] +[[package]] +name = "sp-genesis-builder" +version = "0.1.0" +dependencies = [ + "serde_json", + "sp-api", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-inherents" version = "4.0.0-dev" @@ -10972,7 +11457,6 @@ dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "scale-info", - "sp-core", "sp-runtime", "sp-std", "thiserror", @@ -10980,12 +11464,11 @@ dependencies = [ [[package]] name = "sp-io" -version = "7.0.0" +version = "23.0.0" dependencies = [ "bytes", "ed25519", "ed25519-dalek", - "futures", "libsecp256k1", "log", "parity-scale-codec", @@ -11005,7 +11488,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "7.0.0" +version = "24.0.0" dependencies = [ "lazy_static", "sp-core", @@ -11015,14 +11498,12 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.13.0" +version = "0.27.0" dependencies = [ - "futures", "parity-scale-codec", "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", - "serde", "sp-core", "sp-externalities", "thiserror", @@ -11050,7 +11531,7 @@ dependencies = [ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "ckb-merkle-mountain-range", "log", "parity-scale-codec", @@ -11083,11 +11564,9 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "honggfuzz", - "parity-scale-codec", "rand 0.8.5", - "scale-info", "sp-npos-elections", "sp-runtime", ] @@ -11103,7 +11582,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "5.0.0" +version = "8.0.0" dependencies = [ "backtrace", "lazy_static", @@ -11122,7 +11601,7 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "7.0.0" +version = "24.0.0" dependencies = [ "either", "hash256-std-hasher", @@ -11149,7 +11628,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "7.0.0" +version = "17.0.0" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11172,13 +11651,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "6.0.0" +version = "11.0.0" dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -11216,7 +11695,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime-interface", - "sp-std", "substrate-wasm-builder", ] @@ -11228,6 +11706,7 @@ dependencies = [ "scale-info", "sp-api", "sp-core", + "sp-keystore", "sp-runtime", "sp-staking", "sp-std", @@ -11237,6 +11716,7 @@ dependencies = [ name = "sp-staking" version = "4.0.0-dev" dependencies = [ + "impl-trait-for-tuples", "parity-scale-codec", "scale-info", "serde", @@ -11247,9 +11727,9 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.13.0" +version = "0.28.0" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "assert_matches", "hash-db", "log", @@ -11273,9 +11753,14 @@ dependencies = [ name = "sp-statement-store" version = "4.0.0-dev" dependencies = [ - "log", + "aes-gcm 0.10.2", + "curve25519-dalek 3.2.0", + "ed25519-dalek", + "hkdf", "parity-scale-codec", + "rand 0.8.5", "scale-info", + "sha2 0.10.7", "sp-api", "sp-application-crypto", "sp-core", @@ -11284,15 +11769,16 @@ dependencies = [ "sp-runtime-interface", "sp-std", "thiserror", + "x25519-dalek 2.0.0-pre.1", ] [[package]] name = "sp-std" -version = "5.0.0" +version = "8.0.0" [[package]] name = "sp-storage" -version = "7.0.0" +version = "13.0.0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11320,8 +11806,6 @@ name = "sp-timestamp" version = "4.0.0-dev" dependencies = [ "async-trait", - "futures-timer", - "log", "parity-scale-codec", "sp-inherents", "sp-runtime", @@ -11331,13 +11815,13 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "6.0.0" +version = "10.0.0" dependencies = [ "parity-scale-codec", "sp-std", "tracing", "tracing-core", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -11353,7 +11837,6 @@ name = "sp-transaction-storage-proof" version = "4.0.0-dev" dependencies = [ "async-trait", - "log", "parity-scale-codec", "scale-info", "sp-core", @@ -11365,10 +11848,10 @@ dependencies = [ [[package]] name = "sp-trie" -version = "7.0.0" +version = "22.0.0" dependencies = [ "ahash 0.8.3", - "array-bytes 4.2.0", + "array-bytes", "criterion", "hash-db", "hashbrown 0.13.2", @@ -11392,7 +11875,7 @@ dependencies = [ [[package]] name = "sp-version" -version = "5.0.0" +version = "22.0.0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11408,31 +11891,30 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" -version = "4.0.0-dev" +version = "8.0.0" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", "sp-version", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "sp-wasm-interface" -version = "7.0.0" +version = "14.0.0" dependencies = [ "anyhow", "impl-trait-for-tuples", "log", "parity-scale-codec", "sp-std", - "wasmi 0.13.2", "wasmtime", ] [[package]] name = "sp-weights" -version = "4.0.0" +version = "20.0.0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11484,7 +11966,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.5", + "der 0.7.6", ] [[package]] @@ -11581,7 +12063,7 @@ dependencies = [ "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -11593,8 +12075,28 @@ dependencies = [ name = "subkey" version = "3.0.0" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", + "sc-cli", +] + +[[package]] +name = "substrate" +version = "0.0.0" +dependencies = [ + "aquamarine", + "chain-spec-builder", + "frame-support", + "node-cli", "sc-cli", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-beefy", + "sc-consensus-grandpa", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-service", + "sp-runtime", + "subkey", ] [[package]] @@ -11613,9 +12115,6 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -dependencies = [ - "platforms 2.0.0", -] [[package]] name = "substrate-cli-test-utils" @@ -11624,10 +12123,13 @@ dependencies = [ "assert_cmd", "futures", "nix 0.26.2", + "node-cli", "node-primitives", "regex", + "sc-cli", + "sc-service", + "sp-rpc", "substrate-rpc-client", - "tempfile", "tokio", ] @@ -11635,7 +12137,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.2.7", + "clap 4.3.2", "frame-support", "frame-system", "sc-cli", @@ -11713,11 +12215,9 @@ name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" dependencies = [ "jsonrpsee", - "log", "parity-scale-codec", "sc-client-api", "sc-rpc-api", - "scale-info", "serde", "serde_json", "sp-core", @@ -11731,7 +12231,7 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-trait", "futures", "parity-scale-codec", @@ -11756,43 +12256,40 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ - "array-bytes 6.1.0", + "array-bytes", "frame-executive", "frame-support", "frame-system", "frame-system-rpc-runtime-api", "futures", + "json-patch", "log", - "memory-db", "pallet-babe", "pallet-balances", - "pallet-beefy-mmr", - "pallet-root-testing", - "pallet-sudo", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", "sc-executor", + "sc-executor-common", "sc-service", "scale-info", "serde", + "serde_json", "sp-api", "sp-application-crypto", "sp-block-builder", "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", - "sp-consensus-beefy", "sp-consensus-grandpa", "sp-core", - "sp-debug-derive", "sp-externalities", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-offchain", "sp-runtime", - "sp-runtime-interface", "sp-session", "sp-state-machine", "sp-std", @@ -11810,9 +12307,7 @@ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ "futures", - "parity-scale-codec", "sc-block-builder", - "sc-chain-spec", "sc-client-api", "sc-consensus", "sp-api", @@ -11857,7 +12352,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -11877,10 +12372,11 @@ dependencies = [ "build-helper", "cargo_metadata", "filetime", + "parity-wasm", "sp-maybe-compressed-blob", "strum", "tempfile", - "toml 0.7.3", + "toml 0.7.4", "walkdir", "wasm-opt", ] @@ -11913,9 +12409,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ "proc-macro2", "quote", @@ -11936,9 +12432,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags", "core-foundation", @@ -11980,15 +12476,16 @@ checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", "rustix 0.37.19", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -12029,7 +12526,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -12117,7 +12614,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -12160,9 +12657,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", @@ -12185,7 +12682,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -12210,6 +12707,16 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "tokio-rustls" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +dependencies = [ + "rustls 0.21.1", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.14" @@ -12261,9 +12768,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" dependencies = [ "serde", "serde_spanned", @@ -12273,18 +12780,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "serde", @@ -12355,14 +12862,14 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -12408,7 +12915,7 @@ dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers 0.0.1", + "matchers", "parking_lot 0.11.2", "regex", "serde", @@ -12422,24 +12929,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers 0.1.0", - "nu-ansi-term", - "once_cell", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - [[package]] name = "trie-bench" version = "0.37.0" @@ -12546,16 +13035,16 @@ version = "0.10.0-dev" dependencies = [ "assert_cmd", "async-trait", - "clap 4.2.7", + "clap 4.3.2", "frame-remote-externalities", "frame-try-runtime", "hex", "log", + "node-primitives", "parity-scale-codec", "regex", "sc-cli", "sc-executor", - "sc-service", "serde", "serde_json", "sp-api", @@ -12576,6 +13065,7 @@ dependencies = [ "sp-weights", "substrate-cli-test-utils", "substrate-rpc-client", + "tempfile", "tokio", "zstd 0.12.3+zstd.1.5.2", ] @@ -12614,7 +13104,7 @@ dependencies = [ "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -12628,7 +13118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "digest 0.10.6", + "digest 0.10.7", "rand 0.8.5", "static_assertions", ] @@ -12665,9 +13155,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -12702,9 +13192,9 @@ dependencies = [ [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -12730,12 +13220,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -12747,9 +13237,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ "getrandom 0.2.9", ] @@ -12792,11 +13282,11 @@ dependencies = [ "ark-serialize-derive", "arrayref", "constcat", - "digest 0.10.6", + "digest 0.10.7", "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "thiserror", "zeroize", @@ -12866,9 +13356,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -12876,24 +13366,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.35" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -12903,9 +13393,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12913,28 +13403,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-encoder" -version = "0.26.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" dependencies = [ "leb128", ] @@ -12959,9 +13449,9 @@ dependencies = [ [[package]] name = "wasm-opt" -version = "0.112.0" +version = "0.114.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fef6d0d508f08334e0ab0e6877feb4c0ecb3956bcf2cb950699b22fedf3e9c" +checksum = "d984c9ca0fd8dc99c85920c73d1707d0c2104b5cb8f368fce73b3dbf4424b22b" dependencies = [ "anyhow", "libc", @@ -12975,9 +13465,9 @@ dependencies = [ [[package]] name = "wasm-opt-cxx-sys" -version = "0.112.0" +version = "0.114.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc816bbc1596c8f2e8127e137a760c798023ef3d378f2ae51f0f1840e2dfa445" +checksum = "e754ce2f058a43fa604c588d111cfdc963131ad66d9f96c061d76a4f1a4a4eb0" dependencies = [ "anyhow", "cxx", @@ -12987,9 +13477,9 @@ dependencies = [ [[package]] name = "wasm-opt-sys" -version = "0.112.0" +version = "0.114.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40199e4f68ef1071b3c6d0bd8026a12b481865d4b9e49c156932ea9a6234dd14" +checksum = "b7283687ca12943aa186bba3d2ec43e87039098450c4701420eabd0a770e9b69" dependencies = [ "anyhow", "cc", @@ -13014,56 +13504,24 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" -dependencies = [ - "parity-wasm", - "wasmi-validation", - "wasmi_core 0.2.1", -] - -[[package]] -name = "wasmi" -version = "0.28.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e61a7006b0fdf24f6bbe8dcfdad5ca1b350de80061fb2827f31c82fbbb9565a" +checksum = "e51fb5c61993e71158abf5bb863df2674ca3ec39ed6471c64f07aeaf751d67b4" dependencies = [ + "intx", + "smallvec", "spin 0.9.8", "wasmi_arena", - "wasmi_core 0.12.0", + "wasmi_core", "wasmparser-nostd", ] -[[package]] -name = "wasmi-validation" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" -dependencies = [ - "parity-wasm", -] - [[package]] name = "wasmi_arena" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" -[[package]] -name = "wasmi_core" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" -dependencies = [ - "downcast-rs", - "libm 0.2.6", - "memory_units", - "num-rational", - "num-traits", - "region", -] - [[package]] name = "wasmi_core" version = "0.12.0" @@ -13071,16 +13529,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624e6333e861ef49095d2d678b76ebf30b06bf37effca845be7e5b87c90071b7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "num-traits", "paste", ] [[package]] name = "wasmparser" -version = "0.100.0" +version = "0.102.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b20236ab624147dfbb62cf12a19aaf66af0e41b8398838b66e997d07d269d4" +checksum = "48134de3d7598219ab9eaf6b91b15d8e50d31da76b8519fe4ecfcec2cf35104b" dependencies = [ "indexmap", "url", @@ -13097,9 +13555,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a222f5fa1e14b2cefc286f1b68494d7a965f4bf57ec04c59bb62673d639af6" +checksum = "f907fdead3153cb9bfb7a93bbd5b62629472dc06dee83605358c64c52ed3dda9" dependencies = [ "anyhow", "bincode", @@ -13107,7 +13565,7 @@ dependencies = [ "indexmap", "libc", "log", - "object 0.29.0", + "object", "once_cell", "paste", "psm", @@ -13120,43 +13578,43 @@ dependencies = [ "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "wasmtime-asm-macros" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4407a7246e7d2f3d8fb1cf0c72fda8dbafdb6dd34d555ae8bea0e5ae031089cc" +checksum = "d3b9daa7c14cd4fa3edbf69de994408d5f4b7b0959ac13fa69d465f6597f810d" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ceb3adf61d654be0be67fffdce42447b0880481348785be5fe40b5dd7663a4c" +checksum = "c86437fa68626fe896e5afc69234bb2b5894949083586535f200385adfd71213" dependencies = [ "anyhow", - "base64 0.13.1", + "base64 0.21.2", "bincode", "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.13", + "rustix 0.36.14", "serde", - "sha2 0.10.6", + "sha2 0.10.7", "toml 0.5.11", - "windows-sys 0.42.0", + "windows-sys 0.45.0", "zstd 0.11.2+zstd.1.5.2", ] [[package]] name = "wasmtime-cranelift" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c366bb8647e01fd08cb5589976284b00abfded5529b33d7e7f3f086c68304a4" +checksum = "b1cefde0cce8cb700b1b21b6298a3837dba46521affd7b8c38a9ee2c869eee04" dependencies = [ "anyhow", "cranelift-codegen", @@ -13164,27 +13622,43 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli 0.26.2", + "gimli", "log", - "object 0.29.0", + "object", "target-lexicon", "thiserror", "wasmparser", + "wasmtime-cranelift-shared", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-cranelift-shared" +version = "8.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd041e382ef5aea1b9fc78442394f1a4f6d676ce457e7076ca4cb3f397882f8b" +dependencies = [ + "anyhow", + "cranelift-codegen", + "cranelift-native", + "gimli", + "object", + "target-lexicon", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b8b50962eae38ee319f7b24900b7cf371f03eebdc17400c1dc8575fc10c9a7" +checksum = "a990198cee4197423045235bf89d3359e69bd2ea031005f4c2d901125955c949" dependencies = [ "anyhow", "cranelift-entity", - "gimli 0.26.2", + "gimli", "indexmap", "log", - "object 0.29.0", + "object", "serde", "target-lexicon", "thiserror", @@ -13194,18 +13668,18 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffaed4f9a234ba5225d8e64eac7b4a5d13b994aeb37353cde2cbeb3febda9eaa" +checksum = "0de48df552cfca1c9b750002d3e07b45772dd033b0b206d5c0968496abf31244" dependencies = [ - "addr2line 0.17.0", + "addr2line", "anyhow", "bincode", "cfg-if", "cpp_demangle", - "gimli 0.26.2", + "gimli", "log", - "object 0.29.0", + "object", "rustc-demangle", "serde", "target-lexicon", @@ -13213,36 +13687,36 @@ dependencies = [ "wasmtime-jit-debug", "wasmtime-jit-icache-coherence", "wasmtime-runtime", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "wasmtime-jit-debug" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eed41cbcbf74ce3ff6f1d07d1b707888166dc408d1a880f651268f4f7c9194b2" +checksum = "6e0554b84c15a27d76281d06838aed94e13a77d7bf604bbbaf548aa20eb93846" dependencies = [ - "object 0.29.0", + "object", "once_cell", - "rustix 0.36.13", + "rustix 0.36.14", ] [[package]] name = "wasmtime-jit-icache-coherence" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a28ae1e648461bfdbb79db3efdaee1bca5b940872e4175390f465593a2e54c" +checksum = "aecae978b13f7f67efb23bd827373ace4578f2137ec110bbf6a4a7cde4121bbd" dependencies = [ "cfg-if", "libc", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "wasmtime-runtime" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e704b126e4252788ccfc3526d4d4511d4b23c521bf123e447ac726c14545217b" +checksum = "658cf6f325232b6760e202e5255d823da5e348fdea827eff0a2a22319000b441" dependencies = [ "anyhow", "cc", @@ -13252,21 +13726,21 @@ dependencies = [ "log", "mach", "memfd", - "memoffset 0.6.5", + "memoffset 0.8.0", "paste", "rand 0.8.5", - "rustix 0.36.13", + "rustix 0.36.14", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "wasmtime-types" -version = "6.0.2" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e5572c5727c1ee7e8f28717aaa8400e4d22dcbd714ea5457d85b5005206568" +checksum = "a4f6fffd2a1011887d57f07654dd112791e872e3ff4a2e626aee8059ee17f06f" dependencies = [ "cranelift-entity", "serde", @@ -13276,9 +13750,9 @@ dependencies = [ [[package]] name = "wast" -version = "57.0.0" +version = "60.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" +checksum = "bd06cc744b536e30387e72a48fdd492105b9c938bb4f415c39c616a7a0a697ad" dependencies = [ "leb128", "memchr", @@ -13288,18 +13762,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" +checksum = "5abe520f0ab205366e9ac7d3e6b2fc71de44e32a2b58f2ec871b6b575bdcea3b" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.62" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -13311,7 +13785,7 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13321,7 +13795,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13350,14 +13824,14 @@ dependencies = [ "rand 0.8.5", "rcgen 0.9.3", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", "rustls 0.19.1", "sdp", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "stun", "thiserror", "time 0.3.21", @@ -13397,7 +13871,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.1", + "aes-gcm 0.10.2", "async-trait", "bincode", "block-modes", @@ -13415,12 +13889,12 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "rcgen 0.9.3", - "ring", + "ring 0.16.20", "rustls 0.19.1", "sec1 0.3.0", "serde", "sha1", - "sha2 0.10.6", + "sha2 0.10.7", "signature 1.6.4", "subtle", "thiserror", @@ -13556,9 +14030,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223" +checksum = "5cd0496a71f3cc6bc4bf0ed91346426a5099e93d89807e663162dc5a1069ff65" dependencies = [ "bytemuck", "safe_arch", @@ -13623,21 +14097,6 @@ dependencies = [ "windows-targets 0.48.0", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -13862,7 +14321,7 @@ dependencies = [ "lazy_static", "nom", "oid-registry 0.4.0", - "ring", + "ring 0.16.20", "rusticata-macros", "thiserror", "time 0.3.21", @@ -13941,7 +14400,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2b1659d4fb37b..9ee8142e23e76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,45 @@ +[package] +name = "substrate" +description = "Next-generation framework for blockchain innovation" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +authors = ["Parity Technologies "] +edition = "2021" +version = "0.0.0" + +# This list of dependencies is for documentation purposes only. +[dependencies] +aquamarine = "0.3.2" + +subkey = { path = "bin/utils/subkey" } +chain-spec-builder = { path = "bin/utils/chain-spec-builder" } + +sc-service = { path = "client/service" } +sc-cli = { path = "client/cli" } +sc-consensus-aura = { path = "client/consensus/aura" } +sc-consensus-babe = { path = "client/consensus/babe" } +sc-consensus-grandpa = { path = "client/consensus/grandpa" } +sc-consensus-beefy = { path = "client/consensus/beefy" } +sc-consensus-manual-seal = { path = "client/consensus/manual-seal" } +sc-consensus-pow = { path = "client/consensus/pow" } + +sp-runtime = { path = "primitives/runtime" } +frame-support = { path = "frame/support" } + +node-cli = { path = "bin/node/cli" } + +# Exists here to be backwards compatible and to support `cargo run` in the workspace. +# +# Just uses the `node-cli` main binary. `node-cli` itself also again exposes the node as +# `substrate-node`. + +# `cargo run` on its own doesn't support features. To use features you must explicitly use +# `node-cli` in your command, e.g. `cargo run -p node-cli --features try-runtime ...`. +[[bin]] +name = "substrate" +path = "bin/node/cli/bin/main.rs" + [workspace] resolver = "2" @@ -38,7 +80,6 @@ members = [ "client/executor", "client/executor/common", "client/executor/runtime-test", - "client/executor/wasmi", "client/executor/wasmtime", "client/informant", "client/keystore", @@ -54,7 +95,6 @@ members = [ "client/network/sync", "client/network/test", "client/offchain", - "client/peerset", "client/allocator", "client/proposer-metrics", "client/rpc", @@ -75,6 +115,7 @@ members = [ "client/transaction-pool/api", "client/utils", "frame/alliance", + "frame/asset-conversion", "frame/assets", "frame/atomic-swap", "frame/aura", @@ -107,9 +148,13 @@ members = [ "frame/election-provider-support/benchmarking", "frame/election-provider-support/solution-type", "frame/election-provider-support/solution-type/fuzzer", + "frame/examples", "frame/examples/basic", "frame/examples/offchain-worker", + "frame/examples/kitchensink", "frame/examples/dev-mode", + "frame/examples/split", + "frame/examples/default-config", "frame/executive", "frame/nis", "frame/grandpa", @@ -129,11 +174,14 @@ members = [ "frame/message-queue", "frame/nfts", "frame/nfts/runtime-api", + "frame/nft-fractionalization", "frame/nomination-pools", "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", "frame/nomination-pools/runtime-api", + "frame/paged-list", + "frame/paged-list/fuzzer", "frame/insecure-randomness-collective-flip", "frame/ranked-collective", "frame/recovery", @@ -166,6 +214,7 @@ members = [ "frame/system/rpc/runtime-api", "frame/timestamp", "frame/transaction-payment", + "frame/transaction-payment/asset-conversion-tx-payment", "frame/transaction-payment/asset-tx-payment", "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", @@ -198,9 +247,11 @@ members = [ "primitives/core", "primitives/core/hashing", "primitives/core/hashing/proc-macro", + "primitives/crypto/ec-utils", "primitives/database", "primitives/debug-derive", "primitives/externalities", + "primitives/genesis-builder", "primitives/inherents", "primitives/io", "primitives/keyring", @@ -237,6 +288,7 @@ members = [ "primitives/weights", "scripts/ci/node-template-release", "test-utils", + "test-utils/cli", "test-utils/client", "test-utils/derive", "test-utils/runtime", diff --git a/README.md b/README.md index 361f410ab2790..b6b5d0ccc9c41 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) [![Stack Exchange](https://img.shields.io/badge/Substrate-Community%20&%20Support-24CC85?logo=stackexchange)](https://substrate.stackexchange.com/) +# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.md) [![Stack Exchange](https://img.shields.io/badge/Substrate-Community%20&%20Support-24CC85?logo=stackexchange)](https://substrate.stackexchange.com/)

@@ -18,7 +18,7 @@ Please do report bugs and [issues here](https://github.com/paritytech/substrate/ ## Contributions & Code of Conduct -Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.adoc`](docs/CONTRIBUTING.adoc). +Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.md`](docs/CONTRIBUTING.md). In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](docs/CODE_OF_CONDUCT.md). ## Security @@ -33,4 +33,3 @@ The security policy and procedures can be found in [`docs/SECURITY.md`](docs/SEC The reason for the split-licensing is to ensure that for the vast majority of teams using Substrate to create feature-chains, then all changes can be made entirely in Apache2-licensed code, allowing teams full freedom over what and how they release and giving licensing clarity to commercial teams. In the interests of the community, we require any deeper improvements made to Substrate's core logic (e.g. Substrate's internal consensus, crypto or database code) to be contributed back so everyone can benefit. - diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 0dd4ee5077b85..337facaaf0898 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -90,7 +90,7 @@ You can also find the source code and instructions for hosting your own instance ### Multi-Node Local Testnet -If you want to see the multi-node consensus algorithm in action, see [Simulate a network](https://docs.substrate.io/tutorials/get-started/simulate-network/). +If you want to see the multi-node consensus algorithm in action, see [Simulate a network](https://docs.substrate.io/tutorials/build-a-blockchain/simulate-network/). ## Template Structure @@ -125,7 +125,7 @@ Take special note of the following: In Substrate, the terms "runtime" and "state transition function" are analogous. Both terms refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. -The Substrate project in this repository uses [FRAME](https://docs.substrate.io/fundamentals/runtime-development/#frame) to construct a blockchain runtime. +The Substrate project in this repository uses [FRAME](https://docs.substrate.io/learn/runtime-development/#frame) to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful [macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to create pallets and flexibly compose them to create blockchains that can address [a variety of needs](https://substrate.io/ecosystem/projects/). @@ -133,19 +133,20 @@ Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this - This file configures several pallets to include in the runtime. Each pallet configuration is defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. -- The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core FRAME Support [system](https://docs.substrate.io/reference/frame-pallets/#system-pallets) library. +- The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://paritytech.github.io/substrate/master/frame_support/macro.construct_runtime.html) macro, which is part of the [core FRAME pallet library](https://docs.substrate.io/reference/frame-pallets/#system-pallets). ### Pallets -The runtime in this project is constructed using many FRAME pallets that ship with the [core Substrate repository](https://github.com/paritytech/substrate/tree/master/frame) and a template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs) directory. +The runtime in this project is constructed using many FRAME pallets that ship with [the Substrate repository](https://github.com/paritytech/substrate/tree/master/frame) and a template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs) directory. -A FRAME pallet is compromised of a number of blockchain primitives: +A FRAME pallet is comprised of a number of blockchain primitives, including: - Storage: FRAME defines a rich set of powerful [storage abstractions](https://docs.substrate.io/build/runtime-storage/) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain. - Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state. -- Events: Substrate uses [events and errors](https://docs.substrate.io/build/events-and-errors/) to notify users of important changes in the runtime. +- Events: Substrate uses [events](https://docs.substrate.io/build/events-and-errors/) to notify users of significant state changes. - Errors: When a dispatchable fails, it returns an error. -- Config: The `Config` configuration interface is used to define the types and parameters upon which a FRAME pallet depends. + +Each pallet has its own `Config` trait which serves as a configuration interface to generically define the types and parameters it depends on. ## Alternatives Installations diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 1801caad676ab..b05fed96bc8a2 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -21,33 +21,30 @@ clap = { version = "4.2.5", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"]} sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-service = { version = "0.10.0-dev", path = "../../../client/service" } sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } -sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } -sc-statement-store = { version = "4.0.0-dev", path = "../../../client/statement-store" } +sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" } sc-consensus-aura = { version = "0.10.0-dev", path = "../../../client/consensus/aura" } sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs jsonrpsee = { version = "0.16.2", features = ["server"] } -sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } @@ -76,7 +73,16 @@ runtime-benchmarks = [ "node-template-runtime/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli/try-runtime"] +try-runtime = [ + "node-template-runtime/try-runtime", + "try-runtime-cli/try-runtime", + "frame-system/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/bin/node-template/node/src/benchmarking.rs b/bin/node-template/node/src/benchmarking.rs index 37e0e465969de..6e29ad1a12311 100644 --- a/bin/node-template/node/src/benchmarking.rs +++ b/bin/node-template/node/src/benchmarking.rs @@ -82,11 +82,8 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic( self.client.as_ref(), acc, - BalancesCall::transfer_keep_alive { - dest: self.dest.clone().into(), - value: self.value.into(), - } - .into(), + BalancesCall::transfer_keep_alive { dest: self.dest.clone().into(), value: self.value } + .into(), nonce, ) .into(); @@ -143,10 +140,10 @@ pub fn create_benchmark_extrinsic( let signature = raw_payload.using_encoded(|e| sender.sign(e)); runtime::UncheckedExtrinsic::new_signed( - call.clone(), + call, sp_runtime::AccountId32::from(sender.public()).into(), - runtime::Signature::Sr25519(signature.clone()), - extra.clone(), + runtime::Signature::Sr25519(signature), + extra, ) } diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index e978596beb7b1..2cd2d07293026 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -1,6 +1,6 @@ use node_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, Signature, SudoConfig, - SystemConfig, WASM_BINARY, + AccountId, AuraConfig, BalancesConfig, GrandpaConfig, RuntimeGenesisConfig, Signature, + SudoConfig, SystemConfig, WASM_BINARY, }; use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -12,7 +12,7 @@ use sp_runtime::traits::{IdentifyAccount, Verify}; // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Generate a crypto pair from seed. pub fn get_from_seed(seed: &str) -> ::Public { @@ -131,11 +131,12 @@ fn testnet_genesis( root_key: AccountId, endowed_accounts: Vec, _enable_println: bool, -) -> GenesisConfig { - GenesisConfig { +) -> RuntimeGenesisConfig { + RuntimeGenesisConfig { system: SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), + ..Default::default() }, balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. @@ -146,6 +147,7 @@ fn testnet_genesis( }, grandpa: GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + ..Default::default() }, sudo: SudoConfig { // Assign network admin rights. diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index dd610477ac469..98037eb886a8e 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -10,6 +10,7 @@ pub struct Cli { } #[derive(Debug, clap::Subcommand)] +#[allow(clippy::large_enum_variant)] pub enum Subcommand { /// Key management cli utilities #[command(subcommand)] @@ -40,12 +41,9 @@ pub enum Subcommand { #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try some command against runtime state. - #[cfg(feature = "try-runtime")] - TryRuntime(try_runtime_cli::TryRuntimeCmd), - - /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. - #[cfg(not(feature = "try-runtime"))] + /// Try-runtime has migrated to a standalone CLI + /// (). The subcommand exists as a stub and + /// deprecation notice. It will be removed entirely some time after Janurary 2024. TryRuntime, /// Db meta columns information. diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e121db820f2a3..a25157693cd43 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -6,13 +6,10 @@ use crate::{ }; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; -use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; +use sc_cli::SubstrateCli; use sc_service::PartialComponents; use sp_keyring::Sr25519Keyring; -#[cfg(feature = "try-runtime")] -use try_runtime_cli::block_building_info::timestamp_with_aura_info; - impl SubstrateCli for Cli { fn impl_name() -> String { "Substrate Node".into() @@ -46,10 +43,6 @@ impl SubstrateCli for Cli { Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &node_template_runtime::VERSION - } } /// Parse and run command line arguments @@ -124,7 +117,7 @@ pub fn run() -> sc_cli::Result<()> { ) } - cmd.run::(config) + cmd.run::(config) }, BenchmarkCmd::Block(cmd) => { let PartialComponents { client, .. } = service::new_partial(&config)?; @@ -176,28 +169,7 @@ pub fn run() -> sc_cli::Result<()> { }) }, #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime(cmd)) => { - use crate::service::ExecutorDispatch; - use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - // we don't need any of the components of new_partial, just a runtime, or a task - // manager to do `async_run`. - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = - sc_service::TaskManager::new(config.tokio_handle.clone(), registry) - .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - let info_provider = timestamp_with_aura_info(6000); - - Ok(( - cmd.run::::ExtendHostFunctions, - >, _>(Some(info_provider)), - task_manager, - )) - }) - }, + Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.into()), #[cfg(not(feature = "try-runtime"))] Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ You can enable it with `--features try-runtime`." diff --git a/bin/node-template/node/src/rpc.rs b/bin/node-template/node/src/rpc.rs index 981f375d0b462..f4f1540f732f7 100644 --- a/bin/node-template/node/src/rpc.rs +++ b/bin/node-template/node/src/rpc.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_template_runtime::{opaque::Block, AccountId, Balance, Index}; +use node_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; @@ -34,7 +34,7 @@ where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata + 'static, C: Send + Sync + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, @@ -45,7 +45,7 @@ where let mut module = RpcModule::new(()); let FullDeps { client, pool, deny_unsafe } = deps; - module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; module.merge(TransactionPayment::new(client).into_rpc())?; // Extend this RPC with a custom API by using the following syntax. diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ca827001b5bcc..7303f5cd6dd6d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,12 +1,14 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +use futures::FutureExt; use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::BlockBackend; +use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; pub use sc_executor::NativeElseWasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; @@ -35,6 +37,11 @@ pub(crate) type FullClient = type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + +#[allow(clippy::type_complexity)] pub fn new_partial( config: &Configuration, ) -> Result< @@ -42,7 +49,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sc_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( sc_consensus_grandpa::GrandpaBlockImport< @@ -68,8 +75,7 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_native_or_wasm_executor(&config); - + let executor = sc_service::new_native_or_wasm_executor(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( config, @@ -95,7 +101,8 @@ pub fn new_partial( let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( client.clone(), - &(client.clone() as Arc<_>), + GRANDPA_JUSTIFICATION_PERIOD, + &client, select_chain.clone(), telemetry.as_ref().map(|x| x.handle()), )?; @@ -179,11 +186,23 @@ pub fn new_full(config: Configuration) -> Result { })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), ); } @@ -224,7 +243,7 @@ pub fn new_full(config: Configuration) -> Result { let proposer_factory = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), - transaction_pool, + transaction_pool.clone(), prometheus_registry.as_ref(), telemetry.as_ref().map(|x| x.handle()), ); @@ -276,7 +295,7 @@ pub fn new_full(config: Configuration) -> Result { let grandpa_config = sc_consensus_grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), - justification_period: 512, + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, name: Some(name), observer_enabled: false, keystore, @@ -300,6 +319,7 @@ pub fn new_full(config: Configuration) -> Result { prometheus_registry, shared_voter_state: SharedVoterState::empty(), telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), }; // the GRANDPA voter task is considered infallible, i.e. diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index bb6d8c511af7e..f468374c2ff9f 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/substrate-developer-hub/substrate-node-template targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } @@ -22,9 +22,9 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../.. frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../../frame/system" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../../../primitives/io" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../../../primitives/io" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [features] default = ["std"] @@ -34,6 +34,18 @@ std = [ "frame-support/std", "frame-system/std", "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 9550d3d546cca..edf7769bab7d3 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -94,7 +94,7 @@ pub mod pallet { // Read a value from storage. match >::get() { // Return an error if the value has not been set. - None => return Err(Error::::NoneValue.into()), + None => Err(Error::::NoneValue.into()), Some(old) => { // Increment the value read from storage; will error in the event of overflow. let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index b4d6905378a5d..244ae1b37859b 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -2,19 +2,15 @@ use crate as pallet_template; use frame_support::traits::{ConstU16, ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, TemplateModule: pallet_template, @@ -28,13 +24,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -55,5 +50,5 @@ impl pallet_template::Config for Test { // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/bin/node-template/pallets/template/src/weights.rs b/bin/node-template/pallets/template/src/weights.rs index e8fbc09bad8e9..7c42936e09f29 100644 --- a/bin/node-template/pallets/template/src/weights.rs +++ b/bin/node-template/pallets/template/src/weights.rs @@ -19,7 +19,6 @@ // * // --steps=50 // --repeat=20 -// --execution=wasm // --wasm-execution=compiled // --output // pallets/template/src/weights.rs diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index d7f7ca4d86a8c..51e30187c9d63 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/substrate-developer-hub/substrate-node-template targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } pallet-aura = { version = "4.0.0-dev", default-features = false, path = "../../../frame/aura" } @@ -30,14 +30,14 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../pri sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/consensus/aura" } sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/consensus/grandpa" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } @@ -112,4 +112,6 @@ try-runtime = [ "pallet-template/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime" ] +experimental = ["pallet-aura/experimental"] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 49346fbdd77da..c3375d2ee601a 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -27,7 +27,8 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, + ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, + StorageInfo, }, weights::{ constants::{ @@ -62,7 +63,7 @@ pub type AccountId = <::Signer as IdentifyAccount>::Account pub type Balance = u128; /// Index of a transaction in the chain. -pub type Index = u32; +pub type Nonce = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; @@ -154,6 +155,8 @@ parameter_types! { impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; + /// The block type for the runtime. + type Block = Block; /// Block & extrinsics weights: base values and limits. type BlockWeights = BlockWeights; /// The maximum length of a block (in bytes). @@ -164,16 +167,12 @@ impl frame_system::Config for Runtime { type RuntimeCall = RuntimeCall; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; + /// The type for storing how many extrinsics an account has signed. + type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; - /// The header type. - type Header = generic::Header; /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; /// The ubiquitous origin type. @@ -207,6 +206,10 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<32>; + type AllowMultipleBlocksPerSlot = ConstBool; + + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; } impl pallet_grandpa::Config for Runtime { @@ -214,6 +217,7 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = ConstU32<32>; + type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = ConstU64<0>; type KeyOwnerProof = sp_core::Void; @@ -245,7 +249,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -276,12 +280,7 @@ impl pallet_template::Config for Runtime { // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub struct Runtime { System: frame_system, Timestamp: pallet_timestamp, Aura: pallet_aura, @@ -460,8 +459,8 @@ impl_runtime_apis! { } } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index b3f1434a9c27b..7703f8ed2e4e0 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -12,27 +12,27 @@ publish = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" clap = { version = "4.2.5", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "3.0.0-dev", path = "../testing" } kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } -serde = "1.0.136" +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } +serde = "1.0.163" serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" kvdb-rocksdb = "0.19.0" -sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-trie = { version = "22.0.0", path = "../../../primitives/trie" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } hash-db = "0.16.0" tempfile = "3.1.0" fs_extra = "1" diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index ec2a829f692a6..4f3ca07f86b9d 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -28,7 +28,7 @@ use futures::Future; use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; use node_primitives::Block; -use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool_api::{ ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource, TransactionStatusStreamFor, TxHash, @@ -43,7 +43,6 @@ use crate::{ }; pub struct ConstructionBenchmarkDescription { - pub profile: Profile, pub key_types: KeyTypes, pub block_type: BlockType, pub size: SizeType, @@ -51,7 +50,6 @@ pub struct ConstructionBenchmarkDescription { } pub struct ConstructionBenchmark { - profile: Profile, database: BenchDb, transactions: Transactions, } @@ -60,11 +58,6 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn path(&self) -> Path { let mut path = Path::new(&["node", "proposer"]); - match self.profile { - Profile::Wasm => path.push("wasm"), - Profile::Native => path.push("native"), - } - match self.key_types { KeyTypes::Sr25519 => path.push("sr25519"), KeyTypes::Ed25519 => path.push("ed25519"), @@ -99,7 +92,6 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { } Box::new(ConstructionBenchmark { - profile: self.profile, database: bench_db, transactions: Transactions(extrinsics), }) @@ -107,8 +99,8 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( - "Block construction ({:?}/{}, {:?}, {:?} backend)", - self.block_type, self.size, self.profile, self.database_type, + "Block construction ({:?}/{}, {:?} backend)", + self.block_type, self.size, self.database_type, ) .into() } @@ -116,7 +108,7 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { impl core::Benchmark for ConstructionBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { - let context = self.database.create_context(self.profile); + let context = self.database.create_context(); let _ = context .client diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 167377ea9a220..78b280076e0bd 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -33,7 +33,7 @@ use std::borrow::Cow; use node_primitives::Block; -use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_client_api::backend::Backend; use sp_state_machine::InspectState; @@ -43,7 +43,6 @@ use crate::{ }; pub struct ImportBenchmarkDescription { - pub profile: Profile, pub key_types: KeyTypes, pub block_type: BlockType, pub size: SizeType, @@ -51,7 +50,6 @@ pub struct ImportBenchmarkDescription { } pub struct ImportBenchmark { - profile: Profile, database: BenchDb, block: Block, block_type: BlockType, @@ -61,11 +59,6 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn path(&self) -> Path { let mut path = Path::new(&["node", "import"]); - match self.profile { - Profile::Wasm => path.push("wasm"), - Profile::Native => path.push("native"), - } - match self.key_types { KeyTypes::Sr25519 => path.push("sr25519"), KeyTypes::Ed25519 => path.push("ed25519"), @@ -88,21 +81,15 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { } fn setup(self: Box) -> Box { - let profile = self.profile; let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types); let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions())); - Box::new(ImportBenchmark { - database: bench_db, - block_type: self.block_type, - block, - profile, - }) + Box::new(ImportBenchmark { database: bench_db, block_type: self.block_type, block }) } fn name(&self) -> Cow<'static, str> { format!( - "Block import ({:?}/{}, {:?}, {:?} backend)", - self.block_type, self.size, self.profile, self.database_type, + "Block import ({:?}/{}, {:?} backend)", + self.block_type, self.size, self.database_type, ) .into() } @@ -110,7 +97,7 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { impl core::Benchmark for ImportBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { - let mut context = self.database.create_context(self.profile); + let mut context = self.database.create_context(); let _ = context .client diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 051d8ddb9bf55..1f69c97695801 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -30,7 +30,7 @@ mod txpool; use clap::Parser; -use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes, Profile}; +use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes}; use crate::{ common::SizeType, @@ -85,31 +85,28 @@ fn main() { let mut import_benchmarks = Vec::new(); - for profile in [Profile::Wasm, Profile::Native] { - for size in [ - SizeType::Empty, - SizeType::Small, - SizeType::Medium, - SizeType::Large, - SizeType::Full, - SizeType::Custom(opt.transactions.unwrap_or(0)), + for size in [ + SizeType::Empty, + SizeType::Small, + SizeType::Medium, + SizeType::Large, + SizeType::Full, + SizeType::Custom(opt.transactions.unwrap_or(0)), + ] { + for block_type in [ + BlockType::RandomTransfersKeepAlive, + BlockType::RandomTransfersReaping, + BlockType::Noop, ] { - for block_type in [ - BlockType::RandomTransfersKeepAlive, - BlockType::RandomTransfersReaping, - BlockType::Noop, - ] { - for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] { - import_benchmarks.push((profile, size, block_type, database_type)); - } + for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] { + import_benchmarks.push((size, block_type, database_type)); } } } let benchmarks = matrix!( - (profile, size, block_type, database_type) in import_benchmarks.into_iter() => + (size, block_type, database_type) in import_benchmarks.into_iter() => ImportBenchmarkDescription { - profile, key_types: KeyTypes::Sr25519, size, block_type, @@ -138,14 +135,12 @@ fn main() { .iter().map(move |db_type| (size, db_type))) => TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type }, ConstructionBenchmarkDescription { - profile: Profile::Wasm, key_types: KeyTypes::Sr25519, block_type: BlockType::RandomTransfersKeepAlive, size: SizeType::Medium, database_type: BenchDataBaseType::RocksDb, }, ConstructionBenchmarkDescription { - profile: Profile::Wasm, key_types: KeyTypes::Sr25519, block_type: BlockType::RandomTransfersKeepAlive, size: SizeType::Large, diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 2aafd013a586a..f3fd693d21fe1 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -90,7 +90,7 @@ impl TempDatabase { }, DatabaseType::ParityDb => Arc::new(ParityDbWrapper({ let mut options = parity_db::Options::with_columns(self.0.path(), 1); - let mut column_options = &mut options.columns[0]; + let column_options = &mut options.columns[0]; column_options.ref_counted = true; column_options.preimage = true; column_options.uniform = true; diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index 4e8e5c0d9a4fd..a3524ac5bc890 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -23,7 +23,7 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{TransactionPool, TransactionSource}; @@ -57,7 +57,7 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { impl core::Benchmark for PoolBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { - let context = self.database.create_context(Profile::Wasm); + let context = self.database.create_context(); let _ = context .client diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index ca9b25b6e75f3..37f03fbb0dd7e 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -6,7 +6,7 @@ description = "Generic Substrate node implementation in Rust." build = "build.rs" edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -default-run = "substrate" +default-run = "substrate-node" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false @@ -25,8 +25,10 @@ maintenance = { status = "actively-developed" } is-it-maintained-issue-resolution = { repository = "paritytech/substrate" } is-it-maintained-open-issues = { repository = "paritytech/substrate" } +# The same node binary as the `substrate` (defined in the workspace `Cargo.toml`) binary, +# but just exposed by this crate here. [[bin]] -name = "substrate" +name = "substrate-node" path = "bin/main.rs" required-features = ["cli"] @@ -35,10 +37,10 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "4.1" +array-bytes = "6.1" clap = { version = "4.2.5", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2" } -serde = { version = "1.0.136", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1" } +serde = { version = "1.0.163", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.21" log = "0.4.17" @@ -49,16 +51,16 @@ sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/au sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-consensus-grandpa", path = "../../../primitives/consensus/grandpa" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../../primitives/transaction-storage-proof" } sp-io = { path = "../../../primitives/io" } +sp-statement-store = { path = "../../../primitives/statement-store" } # client dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } @@ -83,13 +85,14 @@ sc-authority-discovery = { version = "0.10.0-dev", path = "../../../client/autho sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } sc-sysinfo = { version = "6.0.0-dev", path = "../../../client/sysinfo" } sc-storage-monitor = { version = "0.1.0", path = "../../../client/storage-monitor" } +sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" } # frame dependencies frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../frame/system/rpc/runtime-api" } -pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } pallet-assets = { version = "4.0.0-dev", path = "../../../frame/assets/" } -pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment/" } +pallet-asset-conversion-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } +pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies @@ -113,7 +116,7 @@ sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/ sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } futures = "0.3.21" tempfile = "3.1.0" @@ -121,7 +124,7 @@ assert_cmd = "2.0.2" nix = { version = "0.26.1", features = ["signal"] } serde_json = "1.0" regex = "1.6.0" -platforms = "2.0" +platforms = "3.0" soketto = "0.7.1" criterion = { version = "0.4.0", features = ["async_tokio"] } tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] } @@ -158,11 +161,32 @@ cli = [ ] runtime-benchmarks = [ "kitchensink-runtime/runtime-benchmarks", - "frame-benchmarking-cli/runtime-benchmarks" + "frame-benchmarking-cli/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-tx-payment/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sc-client-db/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli/try-runtime"] +try-runtime = [ + "kitchensink-runtime/try-runtime", + "try-runtime-cli/try-runtime", + "frame-system/try-runtime", + "pallet-asset-conversion-tx-payment/try-runtime", + "pallet-asset-tx-payment/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "pallet-im-online/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", + "substrate-cli-test-utils/try-runtime" +] [[bench]] name = "transaction_pool" diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 527b145c62c46..b877aa7350228 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -21,7 +21,6 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughpu use kitchensink_runtime::{constants::currency::*, BalancesCall}; use node_cli::service::{create_extrinsic, FullClient}; use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof}; -use sc_client_api::execution_extensions::ExecutionStrategies; use sc_consensus::{ block_import::{BlockImportParams, ForkChoiceStrategy}, BlockImport, StateAction, @@ -56,9 +55,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { let spec = Box::new(node_cli::chain_spec::development_config()); - // NOTE: We enforce the use of the WASM runtime to benchmark block production using WASM. - let execution_strategy = sc_client_api::ExecutionStrategy::AlwaysWasm; - let config = Configuration { impl_name: "BenchmarkImpl".into(), impl_version: "1.0".into(), @@ -77,13 +73,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, }, - execution_strategies: ExecutionStrategies { - syncing: execution_strategy, - importing: execution_strategy, - block_construction: execution_strategy, - offchain_worker: execution_strategy, - other: execution_strategy, - }, rpc_addr: None, rpc_max_connections: Default::default(), rpc_cors: None, @@ -123,13 +112,7 @@ fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { .into() } -fn import_block( - mut client: &FullClient, - built: BuiltBlock< - node_primitives::Block, - >::StateBackend, - >, -) { +fn import_block(mut client: &FullClient, built: BuiltBlock) { let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header); params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes)); diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 44ebe1e7d4fe6..d3e8c02a958f7 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -23,7 +23,6 @@ use futures::{future, StreamExt}; use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall}; use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; use node_primitives::AccountId; -use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, @@ -70,14 +69,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: Default::default(), - // NOTE: we enforce the use of the native runtime to make the errors more debuggable - execution_strategies: ExecutionStrategies { - syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, - importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, - block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, - offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, - other: sc_client_api::ExecutionStrategy::NativeWhenPossible, - }, rpc_addr: None, rpc_max_connections: Default::default(), rpc_cors: None, diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index 18860a1afafb7..033f1e3349e6f 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -37,14 +37,14 @@ mod cli { rerun_if_git_head_changed(); } - /// Build shell completion scripts for all known shells + /// Build shell completion scripts for all known shells. fn build_shell_completion() { for shell in Shell::value_variants() { build_completion(shell); } } - /// Build the shell auto-completion for a given Shell + /// Build the shell auto-completion for a given Shell. fn build_completion(shell: &Shell) { let outdir = match env::var_os("OUT_DIR") { None => return, diff --git a/bin/node/cli/doc/shell-completion.adoc b/bin/node/cli/doc/shell-completion.adoc index 8afbd37adb9f4..168f00994fb2d 100644 --- a/bin/node/cli/doc/shell-completion.adoc +++ b/bin/node/cli/doc/shell-completion.adoc @@ -1,7 +1,7 @@ == Shell completion -The Substrate cli command supports shell auto-completion. For this to work, you will need to run the completion script matching you build and system. +The Substrate cli command supports shell auto-completion. For this to work, you will need to run the completion script matching your build and system. Assuming you built a release version using `cargo build --release` and use `bash` run the following: @@ -15,7 +15,7 @@ You can find completion scripts for: - elvish - powershell -To make this change persistent, you can proceed as follow: +To make this change persistent, you can proceed as follows: .First install @@ -30,7 +30,7 @@ source $HOME/.bash_profile .Update -When you build a new version of Substrate, the following will ensure you auto-completion script matches the current binary: +When you build a new version of Substrate, the following will ensure your auto-completion script matches the current binary: [source, shell] ---- diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 85a08e71cc5a9..57d367e7c49a8 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -20,11 +20,10 @@ use grandpa_primitives::AuthorityId as GrandpaId; use kitchensink_runtime::{ - constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, - BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, - ImOnlineConfig, IndicesConfig, MaxNominations, NominationPoolsConfig, SessionConfig, - SessionKeys, SocietyConfig, StakerStatus, StakingConfig, SudoConfig, SystemConfig, - TechnicalCommitteeConfig, + constants::currency::*, wasm_binary_unwrap, BabeConfig, BalancesConfig, Block, CouncilConfig, + DemocracyConfig, ElectionsConfig, ImOnlineConfig, IndicesConfig, MaxNominations, + NominationPoolsConfig, SessionConfig, SessionKeys, SocietyConfig, StakerStatus, StakingConfig, + SudoConfig, SystemConfig, TechnicalCommitteeConfig, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; @@ -39,7 +38,7 @@ use sp_runtime::{ Perbill, }; -pub use kitchensink_runtime::GenesisConfig; +pub use kitchensink_runtime::RuntimeGenesisConfig; pub use node_primitives::{AccountId, Balance, Signature}; type AccountPublic = ::Signer; @@ -62,7 +61,7 @@ pub struct Extensions { } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) @@ -77,7 +76,7 @@ fn session_keys( SessionKeys { grandpa, babe, im_online, authority_discovery } } -fn staging_testnet_config_genesis() -> GenesisConfig { +fn staging_testnet_config_genesis() -> RuntimeGenesisConfig { #[rustfmt::skip] // stash, controller, session-key // generated with secret: @@ -200,14 +199,14 @@ pub fn staging_testnet_config() -> ChainSpec { ) } -/// Helper function to generate a crypto pair from seed +/// Helper function to generate a crypto pair from seed. pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } -/// Helper function to generate an account ID from seed +/// Helper function to generate an account ID from seed. pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic: From<::Public>, @@ -215,7 +214,7 @@ where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Helper function to generate stash, controller and session key from seed +/// Helper function to generate stash, controller and session key from seed. pub fn authority_keys_from_seed( seed: &str, ) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId) { @@ -229,7 +228,7 @@ pub fn authority_keys_from_seed( ) } -/// Helper function to create GenesisConfig for testing +/// Helper function to create RuntimeGenesisConfig for testing. pub fn testnet_genesis( initial_authorities: Vec<( AccountId, @@ -242,7 +241,7 @@ pub fn testnet_genesis( initial_nominators: Vec, root_key: AccountId, endowed_accounts: Option>, -) -> GenesisConfig { +) -> RuntimeGenesisConfig { let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { vec![ get_account_id_from_seed::("Alice"), @@ -294,8 +293,8 @@ pub fn testnet_genesis( const ENDOWMENT: Balance = 10_000_000 * DOLLARS; const STASH: Balance = ENDOWMENT / 1000; - GenesisConfig { - system: SystemConfig { code: wasm_binary_unwrap().to_vec() }, + RuntimeGenesisConfig { + system: SystemConfig { code: wasm_binary_unwrap().to_vec(), ..Default::default() }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), }, @@ -340,29 +339,22 @@ pub fn testnet_genesis( }, sudo: SudoConfig { key: Some(root_key) }, babe: BabeConfig { - authorities: vec![], epoch_config: Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() }, im_online: ImOnlineConfig { keys: vec![] }, - authority_discovery: AuthorityDiscoveryConfig { keys: vec![] }, - grandpa: GrandpaConfig { authorities: vec![] }, + authority_discovery: Default::default(), + grandpa: Default::default(), technical_membership: Default::default(), treasury: Default::default(), - society: SocietyConfig { - members: endowed_accounts - .iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), - pot: 0, - max_members: 999, - }, + society: SocietyConfig { pot: 0 }, vesting: Default::default(), assets: pallet_assets::GenesisConfig { // This asset is used by the NIS pallet as counterpart currency. assets: vec![(9, get_account_id_from_seed::("Alice"), true, 1)], ..Default::default() }, + pool_assets: Default::default(), transaction_storage: Default::default(), transaction_payment: Default::default(), alliance: Default::default(), @@ -372,10 +364,11 @@ pub fn testnet_genesis( min_join_bond: 1 * DOLLARS, ..Default::default() }, + glutton: Default::default(), } } -fn development_config_genesis() -> GenesisConfig { +fn development_config_genesis() -> RuntimeGenesisConfig { testnet_genesis( vec![authority_keys_from_seed("Alice")], vec![], @@ -384,7 +377,7 @@ fn development_config_genesis() -> GenesisConfig { ) } -/// Development config (single validator Alice) +/// Development config (single validator Alice). pub fn development_config() -> ChainSpec { ChainSpec::from_genesis( "Development", @@ -400,7 +393,7 @@ pub fn development_config() -> ChainSpec { ) } -fn local_testnet_genesis() -> GenesisConfig { +fn local_testnet_genesis() -> RuntimeGenesisConfig { testnet_genesis( vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], vec![], @@ -409,7 +402,7 @@ fn local_testnet_genesis() -> GenesisConfig { ) } -/// Local testnet config (multivalidator Alice + Bob) +/// Local testnet config (multivalidator Alice + Bob). pub fn local_testnet_config() -> ChainSpec { ChainSpec::from_genesis( "Local Testnet", @@ -432,7 +425,7 @@ pub(crate) mod tests { use sc_service_test; use sp_runtime::BuildStorage; - fn local_testnet_genesis_instant_single() -> GenesisConfig { + fn local_testnet_genesis_instant_single() -> RuntimeGenesisConfig { testnet_genesis( vec![authority_keys_from_seed("Alice")], vec![], @@ -441,7 +434,7 @@ pub(crate) mod tests { ) } - /// Local testnet config (single validator - Alice) + /// Local testnet config (single validator - Alice). pub fn integration_test_config_with_single_authority() -> ChainSpec { ChainSpec::from_genesis( "Integration Test", @@ -457,7 +450,7 @@ pub(crate) mod tests { ) } - /// Local testnet config (multivalidator Alice + Bob) + /// Local testnet config (multivalidator Alice + Bob). pub fn integration_test_config_with_two_authorities() -> ChainSpec { ChainSpec::from_genesis( "Integration Test", diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 35b949831141d..4e0d6303870cb 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -57,12 +57,9 @@ pub enum Subcommand { #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try some command against runtime state. - #[cfg(feature = "try-runtime")] - TryRuntime(try_runtime_cli::TryRuntimeCmd), - - /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. - #[cfg(not(feature = "try-runtime"))] + /// Try-runtime has migrated to a standalone CLI + /// (). The subcommand exists as a stub and + /// deprecation notice. It will be removed entirely some time after Janurary 2024. TryRuntime, /// Key management cli utilities diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b38b25d8fb3ad..8fb413dba1778 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -26,18 +26,12 @@ use frame_benchmarking_cli::*; use kitchensink_runtime::{ExistentialDeposit, RuntimeApi}; use node_executor::ExecutorDispatch; use node_primitives::Block; -use sc_cli::{ChainSpec, Result, RuntimeVersion, SubstrateCli}; +use sc_cli::{Result, SubstrateCli}; use sc_service::PartialComponents; use sp_keyring::Sr25519Keyring; use std::sync::Arc; -#[cfg(feature = "try-runtime")] -use { - kitchensink_runtime::constants::time::SLOT_DURATION, - try_runtime_cli::block_building_info::substrate_info, -}; - impl SubstrateCli for Cli { fn impl_name() -> String { "Substrate Node".into() @@ -79,10 +73,6 @@ impl SubstrateCli for Cli { }; Ok(spec) } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &kitchensink_runtime::VERSION - } } /// Parse command line arguments into service configuration. @@ -117,7 +107,7 @@ pub fn run() -> Result<()> { ) } - cmd.run::(config) + cmd.run::(config) }, BenchmarkCmd::Block(cmd) => { // ensure that we keep the task manager alive @@ -231,28 +221,7 @@ pub fn run() -> Result<()> { }) }, #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime(cmd)) => { - use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - // we don't need any of the components of new_partial, just a runtime, or a task - // manager to do `async_run`. - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = - sc_service::TaskManager::new(config.tokio_handle.clone(), registry) - .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - - let info_provider = substrate_info(SLOT_DURATION); - - Ok(( - cmd.run::::ExtendHostFunctions, - >, _>(Some(info_provider)), - task_manager, - )) - }) - }, + Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.into()), #[cfg(not(feature = "try-runtime"))] Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ You can enable it with `--features try-runtime`." diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 8fc44c7c5eddf..ecca5c60db515 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -28,7 +28,7 @@ use futures::prelude::*; use kitchensink_runtime::RuntimeApi; use node_executor::ExecutorDispatch; use node_primitives::Block; -use sc_client_api::BlockBackend; +use sc_client_api::{Backend, BlockBackend}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::{event::Event, NetworkEventStream, NetworkService}; @@ -37,6 +37,7 @@ use sc_network_sync::SyncingService; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_statement_store::Store as StatementStore; use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; use sp_runtime::{generic, traits::Block as BlockT, SaturatedConversion}; @@ -50,9 +51,13 @@ type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; -/// The transaction pool type defintion. +/// The transaction pool type definition. pub type TransactionPool = sc_transaction_pool::FullPool; +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + /// Fetch the nonce of the given `account` from the chain state. /// /// Note: Should only be used for tests. @@ -98,7 +103,7 @@ pub fn create_extrinsic( )), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( tip, None, ), ); @@ -135,7 +140,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sc_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( impl Fn( @@ -192,6 +197,7 @@ pub fn new_partial( let (grandpa_block_import, grandpa_link) = grandpa::block_import( client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, &(client.clone() as Arc<_>), select_chain.clone(), telemetry.as_ref().map(|x| x.handle()), @@ -205,27 +211,29 @@ pub fn new_partial( )?; let slot_duration = babe_link.config().slot_duration(); - let (import_queue, babe_worker_handle) = sc_consensus_babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - client.clone(), - select_chain.clone(), - move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let (import_queue, babe_worker_handle) = + sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { + link: babe_link.clone(), + block_import: block_import.clone(), + justification_import: Some(Box::new(justification_import)), + client: client.clone(), + select_chain: select_chain.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = + let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( *timestamp, slot_duration, ); - Ok((slot, timestamp)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - telemetry.as_ref().map(|x| x.handle()), - )?; + Ok((slot, timestamp)) + }, + spawner: &task_manager.spawn_essential_handle(), + registry: config.prometheus_registry(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), + })?; let import_setup = (block_import, grandpa_link, babe_link); @@ -233,6 +241,7 @@ pub fn new_partial( &config.data_path, Default::default(), client.clone(), + keystore_container.local_keystore(), config.prometheus_registry(), &task_manager.spawn_handle(), ) @@ -278,9 +287,10 @@ pub fn new_partial( finality_provider: finality_proof_provider.clone(), }, statement_store: rpc_statement_store.clone(), + backend: rpc_backend.clone(), }; - node_rpc::create_full(deps, rpc_backend.clone()).map_err(Into::into) + node_rpc::create_full(deps).map_err(Into::into) }; (rpc_extensions_builder, shared_voter_state2) @@ -381,15 +391,6 @@ pub fn new_full_base( warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), })?; - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - let role = config.role.clone(); let force_authoring = config.force_authoring; let backoff_authoring_blocks = @@ -397,10 +398,11 @@ pub fn new_full_base( let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); + let enable_offchain_worker = config.offchain_worker.enabled; let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, - backend, + backend: backend.clone(), client: client.clone(), keystore: keystore_container.keystore(), network: network.clone(), @@ -525,14 +527,14 @@ pub fn new_full_base( // need a keystore, regardless of which protocol we use below. let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; - let config = grandpa::Config { + let grandpa_config = grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, name: Some(name), observer_enabled: false, keystore, - local_role: role, + local_role: role.clone(), telemetry: telemetry.as_ref().map(|x| x.handle()), protocol_name: grandpa_protocol_name, }; @@ -545,7 +547,7 @@ pub fn new_full_base( // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. let grandpa_config = grandpa::GrandpaParams { - config, + config: grandpa_config, link: grandpa_link, network: network.clone(), sync: Arc::new(sync_service.clone()), @@ -553,6 +555,7 @@ pub fn new_full_base( voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry: prometheus_registry.clone(), shared_voter_state, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), }; // the GRANDPA voter task is considered infallible, i.e. @@ -584,6 +587,29 @@ pub fn new_full_base( statement_handler.run(), ); + if enable_offchain_worker { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + is_validator: role.is_authority(), + enable_http_requests: true, + custom_extensions: move |_| { + vec![Box::new(statement_store.clone().as_statement_store_ext()) as Box<_>] + }, + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + network_starter.start_network(); Ok(NewFullBase { task_manager, @@ -815,7 +841,8 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = pallet_asset_tx_payment::ChargeAssetTxPayment::from(0, None); + let tx_payment = + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None); let extra = ( check_non_zero_sender, check_spec_version, diff --git a/bin/node/cli/tests/benchmark_block_works.rs b/bin/node/cli/tests/benchmark_block_works.rs index 50103a66a4d40..11a1c57a713f0 100644 --- a/bin/node/cli/tests/benchmark_block_works.rs +++ b/bin/node/cli/tests/benchmark_block_works.rs @@ -33,13 +33,13 @@ async fn benchmark_block_works() { common::run_node_for_a_while(base_dir.path(), &["--dev", "--no-hardware-benchmarks"]).await; // Invoke `benchmark block` with all options to make sure that they are valid. - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(["benchmark", "block", "--dev"]) .arg("-d") .arg(base_dir.path()) .args(["--from", "1", "--to", "1"]) .args(["--repeat", "1"]) - .args(["--execution", "wasm", "--wasm-execution", "compiled"]) + .args(["--wasm-execution=compiled"]) .status() .unwrap(); diff --git a/bin/node/cli/tests/benchmark_extrinsic_works.rs b/bin/node/cli/tests/benchmark_extrinsic_works.rs index 9cdb971def522..f7addd883b41f 100644 --- a/bin/node/cli/tests/benchmark_extrinsic_works.rs +++ b/bin/node/cli/tests/benchmark_extrinsic_works.rs @@ -32,13 +32,14 @@ fn benchmark_extrinsic_works() { fn benchmark_extrinsic(pallet: &str, extrinsic: &str) { let base_dir = tempdir().expect("could not create a temp dir"); - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(&["benchmark", "extrinsic", "--dev"]) .arg("-d") .arg(base_dir.path()) .args(&["--pallet", pallet, "--extrinsic", extrinsic]) // Run with low repeats for faster execution. .args(["--warmup=10", "--repeat=10", "--max-ext-per-block=10"]) + .args(["--wasm-execution=compiled"]) .status() .unwrap(); diff --git a/bin/node/cli/tests/benchmark_machine_works.rs b/bin/node/cli/tests/benchmark_machine_works.rs index 2cdadb64603ec..b3e3f9c78dea5 100644 --- a/bin/node/cli/tests/benchmark_machine_works.rs +++ b/bin/node/cli/tests/benchmark_machine_works.rs @@ -22,7 +22,7 @@ use std::process::Command; /// Tests that the `benchmark machine` command works for the substrate dev runtime. #[test] fn benchmark_machine_works() { - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(["benchmark", "machine", "--dev"]) .args([ "--verify-duration", @@ -48,7 +48,7 @@ fn benchmark_machine_works() { #[test] #[cfg(debug_assertions)] fn benchmark_machine_fails_with_slow_hardware() { - let output = Command::new(cargo_bin("substrate")) + let output = Command::new(cargo_bin("substrate-node")) .args(["benchmark", "machine", "--dev"]) .args([ "--verify-duration", diff --git a/bin/node/cli/tests/benchmark_overhead_works.rs b/bin/node/cli/tests/benchmark_overhead_works.rs index 92ab93b7f6f26..b246167f2c447 100644 --- a/bin/node/cli/tests/benchmark_overhead_works.rs +++ b/bin/node/cli/tests/benchmark_overhead_works.rs @@ -28,7 +28,7 @@ fn benchmark_overhead_works() { // Only put 10 extrinsics into the block otherwise it takes forever to build it // especially for a non-release build. - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(&["benchmark", "overhead", "--dev", "-d"]) .arg(base_path) .arg("--weight-path") @@ -36,6 +36,7 @@ fn benchmark_overhead_works() { .args(["--warmup", "10", "--repeat", "10"]) .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) .args(["--max-ext-per-block", "10"]) + .args(["--wasm-execution=compiled"]) .status() .unwrap(); assert!(status.success()); diff --git a/bin/node/cli/tests/benchmark_pallet_works.rs b/bin/node/cli/tests/benchmark_pallet_works.rs index 2d9946543eed2..8441333429bea 100644 --- a/bin/node/cli/tests/benchmark_pallet_works.rs +++ b/bin/node/cli/tests/benchmark_pallet_works.rs @@ -34,16 +34,20 @@ fn benchmark_pallet_works() { } fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) { - let output = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(["benchmark", "pallet", "--dev"]) // Use the `addition` benchmark since is the fastest. .args(["--pallet", "frame-benchmarking", "--extrinsic", "addition"]) .args(["--steps", &format!("{}", steps), "--repeat", &format!("{}", repeat)]) - .output() + .args([ + "--wasm-execution=compiled", + "--no-storage-info", + "--no-median-slopes", + "--no-min-squares", + "--heap-pages=4096", + ]) + .status() .unwrap(); - if output.status.success() != should_work { - let log = String::from_utf8_lossy(&output.stderr).to_string(); - panic!("Test failed:\n{}", log); - } + assert_eq!(status.success(), should_work); } diff --git a/bin/node/cli/tests/benchmark_storage_works.rs b/bin/node/cli/tests/benchmark_storage_works.rs index 953c07ca7f0db..e4566f4f9b18d 100644 --- a/bin/node/cli/tests/benchmark_storage_works.rs +++ b/bin/node/cli/tests/benchmark_storage_works.rs @@ -40,7 +40,7 @@ fn benchmark_storage_works() { } fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { - Command::new(cargo_bin("substrate")) + Command::new(cargo_bin("substrate-node")) .args(&["benchmark", "storage", "--dev"]) .arg("--db") .arg(db) diff --git a/bin/node/cli/tests/build_spec_works.rs b/bin/node/cli/tests/build_spec_works.rs index dc5d36184f0c6..ce97dea6f6c84 100644 --- a/bin/node/cli/tests/build_spec_works.rs +++ b/bin/node/cli/tests/build_spec_works.rs @@ -24,7 +24,7 @@ use tempfile::tempdir; fn build_spec_works() { let base_path = tempdir().expect("could not create a temp dir"); - let output = Command::new(cargo_bin("substrate")) + let output = Command::new(cargo_bin("substrate-node")) .args(&["build-spec", "--dev", "-d"]) .arg(base_path.path()) .output() diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 67bc5e6031ea0..083a79c477bab 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -30,7 +30,7 @@ async fn check_block_works() { common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(&["check-block", "--dev", "-d"]) .arg(base_path.path()) .arg("1") diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index b5785f99ea81f..0dc001ac43011 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -96,7 +96,7 @@ impl<'a> ExportImportRevertExecutor<'a> { }; // Running the command and capturing the output. - let output = Command::new(cargo_bin("substrate")) + let output = Command::new(cargo_bin("substrate-node")) .args(&arguments) .arg(&base_path) .arg(&self.exported_blocks_file) @@ -160,7 +160,7 @@ impl<'a> ExportImportRevertExecutor<'a> { /// Runs the `revert` command. fn run_revert(&self) { - let output = Command::new(cargo_bin("substrate")) + let output = Command::new(cargo_bin("substrate-node")) .args(&["revert", "--dev", "-d"]) .arg(&self.base_path.path()) .output() diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 3695c318a8df2..10b0e518e9e87 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -30,7 +30,7 @@ async fn inspect_works() { common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(&["inspect", "--dev", "-d"]) .arg(base_path.path()) .args(&["block", "1"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 77421f865a0d9..58c4f474521f4 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -29,7 +29,7 @@ async fn purge_chain_works() { common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; - let status = Command::new(cargo_bin("substrate")) + let status = Command::new(cargo_bin("substrate-node")) .args(&["purge-chain", "--dev", "-d"]) .arg(base_path.path()) .arg("-y") diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index 1308067da0256..f10ea6a055b49 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -33,7 +33,7 @@ async fn running_the_node_works_and_can_be_interrupted() { async fn run_command_and_kill(signal: Signal) { let base_path = tempdir().expect("could not create a temp dir"); let mut cmd = common::KillChildOnDrop( - Command::new(cargo_bin("substrate")) + Command::new(cargo_bin("substrate-node")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) .args(&["--dev", "-d"]) diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs index 176d2e81ad06b..2321f56c473d6 100644 --- a/bin/node/cli/tests/telemetry.rs +++ b/bin/node/cli/tests/telemetry.rs @@ -67,7 +67,7 @@ async fn telemetry_works() { } }); - let mut substrate = process::Command::new(cargo_bin("substrate")); + let mut substrate = process::Command::new(cargo_bin("substrate-node")); let mut substrate = KillChildOnDrop( substrate diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index fdcd9e23dde5a..42f493afad256 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -31,7 +31,7 @@ use substrate_cli_test_utils as common; //#[tokio::test] async fn temp_base_path_works() { common::run_with_timeout(Duration::from_secs(60 * 10), async move { - let mut cmd = Command::new(cargo_bin("substrate")); + let mut cmd = Command::new(cargo_bin("substrate-node")); let mut child = common::KillChildOnDrop( cmd.args(&["--dev", "--tmp", "--no-hardware-benchmarks"]) .stdout(Stdio::piped()) diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs index e239277c9b0ed..ac1a6b79682ec 100644 --- a/bin/node/cli/tests/version.rs +++ b/bin/node/cli/tests/version.rs @@ -21,17 +21,17 @@ use regex::Regex; use std::process::Command; fn expected_regex() -> Regex { - Regex::new(r"^substrate (.+)-([a-f\d]+)$").unwrap() + Regex::new(r"^substrate-node (.+)-([a-f\d]+)$").unwrap() } #[test] fn version_is_full() { let expected = expected_regex(); - let output = Command::new(cargo_bin("substrate")).args(&["--version"]).output().unwrap(); + let output = Command::new(cargo_bin("substrate-node")).args(&["--version"]).output().unwrap(); assert!(output.status.success(), "command returned with non-success exit code"); - let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); + let output = dbg!(String::from_utf8_lossy(&output.stdout).trim().to_owned()); let captures = expected.captures(output.as_str()).expect("could not parse version in output"); assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); @@ -41,11 +41,11 @@ fn version_is_full() { fn test_regex_matches_properly() { let expected = expected_regex(); - let captures = expected.captures("substrate 2.0.0-da487d19d").unwrap(); + let captures = expected.captures("substrate-node 2.0.0-da487d19d").unwrap(); assert_eq!(&captures[1], "2.0.0"); assert_eq!(&captures[2], "da487d19d"); - let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d").unwrap(); + let captures = expected.captures("substrate-node 2.0.0-alpha.5-da487d19d").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); } diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 5f11d513c434b..b24414482952c 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -13,17 +13,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.5.0", features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } node-primitives = { version = "2.0.0", path = "../primitives" } kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } -sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } +sp-trie = { version = "22.0.0", path = "../../../primitives/trie" } sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } [dev-dependencies] @@ -41,12 +41,12 @@ pallet-sudo = { version = "4.0.0-dev", path = "../../../frame/sudo" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } pallet-root-testing = { version = "1.0.0-dev", path = "../../../frame/root-testing" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } -sp-externalities = { version = "0.13.0", path = "../../../primitives/externalities" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.19.0", path = "../../../primitives/externalities" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [features] stress-test = [] diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index aa7d9eb0f31ff..1c9c002492cf5 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -19,8 +19,8 @@ use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; use kitchensink_runtime::{ - constants::currency::*, Block, BuildStorage, CheckedExtrinsic, GenesisConfig, Header, - RuntimeCall, UncheckedExtrinsic, + constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, RuntimeCall, + RuntimeGenesisConfig, UncheckedExtrinsic, }; use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; @@ -67,7 +67,7 @@ fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) } -fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities { +fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities { let mut test_ext = TestExternalities::new_with_code( compact_code_unwrap(), genesis_config.build_storage().unwrap(), @@ -157,7 +157,7 @@ fn construct_block( } fn test_blocks( - genesis_config: &GenesisConfig, + genesis_config: &RuntimeGenesisConfig, executor: &NativeElseWasmExecutor, ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); @@ -182,7 +182,6 @@ fn bench_execute_block(c: &mut Criterion) { let mut group = c.benchmark_group("execute blocks"); let execution_methods = vec![ ExecutionMethod::Native, - ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), ExecutionMethod::Wasm(WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, }), diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index d301aa06f90b0..a2f46e9fdbe99 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -183,7 +183,7 @@ fn panic_execution_with_foreign_code_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { providers: 1, data: (69u128, 0u128, 0u128, 1u128 << 127), ..Default::default() @@ -209,7 +209,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { providers: 1, data: (69u128, 0u128, 0u128, 1u128 << 127), ..Default::default() @@ -235,7 +235,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { providers: 1, data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() @@ -245,7 +245,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { t.insert( >::hashed_key_for(bob()), AccountInfo::< - ::Index, + ::Nonce, ::AccountData, >::default() .encode(), @@ -277,7 +277,7 @@ fn successful_execution_with_foreign_code_gives_ok() { let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { providers: 1, data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() @@ -287,7 +287,7 @@ fn successful_execution_with_foreign_code_gives_ok() { t.insert( >::hashed_key_for(bob()), AccountInfo::< - ::Index, + ::Nonce, ::AccountData, >::default() .encode(), @@ -766,7 +766,7 @@ fn panic_execution_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { data: (0 * DOLLARS, 0u128, 0u128, 0u128), ..Default::default() } @@ -795,7 +795,7 @@ fn successful_execution_gives_ok() { let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), - AccountInfo::<::Index, _> { + AccountInfo::<::Nonce, _> { providers: 1, data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127), ..Default::default() @@ -805,7 +805,7 @@ fn successful_execution_gives_ok() { t.insert( >::hashed_key_for(bob()), AccountInfo::< - ::Index, + ::Nonce, ::AccountData, >::default() .encode(), diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 970d790a87d3b..7519ce6e8b1b4 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -196,7 +196,7 @@ fn transaction_fee_is_correct() { fn block_weight_capacity_report() { // Just report how many transfer calls you could fit into a block. The number should at least // be a few hundred (250 at the time of writing but can change over time). Runs until panic. - use node_primitives::Index; + use node_primitives::Nonce; // execution ext. let mut t = new_test_ext(compact_code_unwrap()); @@ -205,7 +205,7 @@ fn block_weight_capacity_report() { let factor = 50; let mut time = 10; - let mut nonce: Index = 0; + let mut nonce: Nonce = 0; let mut block_number = 1; let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); @@ -213,7 +213,7 @@ fn block_weight_capacity_report() { let num_transfers = block_number * factor; let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), + signed: Some((charlie(), signed_extra(nonce + i as Nonce, 0))), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 0, @@ -266,7 +266,7 @@ fn block_length_capacity_report() { // Just report how big a block can get. Executes until panic. Should be ignored unless if // manually inspected. The number should at least be a few megabytes (5 at the time of // writing but can change over time). - use node_primitives::Index; + use node_primitives::Nonce; // execution ext. let mut t = new_test_ext(compact_code_unwrap()); @@ -275,7 +275,7 @@ fn block_length_capacity_report() { let factor = 256 * 1024; let mut time = 10; - let mut nonce: Index = 0; + let mut nonce: Nonce = 0; let mut block_number = 1; let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index b260f90a87466..7678a3c6e5a9f 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -37,7 +37,6 @@ fn should_submit_unsigned_transaction() { pallet_im_online::sr25519::AuthoritySignature::try_from(vec![0; 64]).unwrap(); let heartbeat_data = pallet_im_online::Heartbeat { block_number: 1, - network_state: Default::default(), session_index: 1, authority_index: 0, validators_len: 0, diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index f77c52aa5b056..41894fb60e810 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -14,12 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.2.5", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index 9702576833ccf..dcecfd7882644 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -25,14 +25,12 @@ use crate::{ use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_runtime::traits::Block; -use std::str::FromStr; impl InspectCmd { /// Run the inspect command, passing the inspector. pub fn run(&self, config: Configuration) -> Result<()> where B: Block, - B::Hash: FromStr, RA: Send + Sync + 'static, D: NativeExecutionDispatch + 'static, { diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 5764e0f05c172..65dfecdf77a2d 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -33,13 +33,13 @@ use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ generic::BlockId, - traits::{Block, Hash, HashFor, NumberFor}, + traits::{Block, Hash, HashingFor, NumberFor}, }; use std::{fmt, fmt::Debug, marker::PhantomData, str::FromStr}; /// A helper type for a generic block input. pub type BlockAddressFor = - BlockAddress< as Hash>::Output, NumberFor>; + BlockAddress< as Hash>::Output, NumberFor>; /// A Pretty formatter implementation. pub trait PrettyPrinter { @@ -166,7 +166,7 @@ impl> Inspector /// Get a pretty-printed extrinsic. pub fn extrinsic( &self, - input: ExtrinsicAddress< as Hash>::Output, NumberFor>, + input: ExtrinsicAddress< as Hash>::Output, NumberFor>, ) -> Result { struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B); impl<'a, A: Block, B: PrettyPrinter> fmt::Display for ExtrinsicPrinter<'a, A, B> { diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 78cbf67ce7710..ac178c67f7188 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -13,22 +13,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ - "derive", -] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ - "codec/std", - "frame-system/std", - "scale-info/std", - "sp-application-crypto/std", "sp-core/std", "sp-runtime/std", ] diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs index e2fa5c3108149..24a67cbdd8f78 100644 --- a/bin/node/primitives/src/lib.rs +++ b/bin/node/primitives/src/lib.rs @@ -46,7 +46,7 @@ pub type Balance = u128; pub type Moment = u64; /// Index of a transaction in the chain. -pub type Index = u32; +pub type Nonce = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 8a336242cd267..9f5d12e22d372 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -33,8 +33,8 @@ sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-bu sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } substrate-state-trie-migration-rpc = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/state-trie-migration-rpc/" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 5ab96bf1c7064..6d8aa5ff0a9da 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -34,7 +34,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; +use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Nonce}; use sc_client_api::AuxStore; use sc_consensus_babe::BabeWorkerHandle; use sc_consensus_grandpa::{ @@ -90,12 +90,23 @@ pub struct FullDeps { pub grandpa: GrandpaDeps, /// Shared statement store reference. pub statement_store: Arc, + /// The backend used by the node. + pub backend: Arc, } /// Instantiate all Full RPC extensions. pub fn create_full( - deps: FullDeps, - backend: Arc, + FullDeps { + client, + pool, + select_chain, + chain_spec, + deny_unsafe, + babe, + grandpa, + statement_store, + backend, + }: FullDeps, ) -> Result, Box> where C: ProvideRuntimeApi @@ -106,7 +117,7 @@ where + Sync + Send + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: mmr_rpc::MmrRuntimeApi::Hash, BlockNumber>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, @@ -114,7 +125,7 @@ where P: TransactionPool + 'static, SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, - B::State: sc_client_api::backend::StateBackend>, + B::State: sc_client_api::backend::StateBackend>, { use mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; @@ -130,16 +141,6 @@ where use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut io = RpcModule::new(()); - let FullDeps { - client, - pool, - select_chain, - chain_spec, - deny_unsafe, - babe, - grandpa, - statement_store, - } = deps; let BabeDeps { keystore, babe_worker_handle } = babe; let GrandpaDeps { @@ -159,7 +160,15 @@ where // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.merge(Mmr::new(client.clone()).into_rpc())?; + io.merge( + Mmr::new( + client.clone(), + backend + .offchain_storage() + .ok_or_else(|| "Backend doesn't provide an offchain storage")?, + ) + .into_rpc(), + )?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( Babe::new(client.clone(), babe_worker_handle.clone(), keystore, select_chain, deny_unsafe) diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 3909eff2af4f0..de0ec1a5489a8 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } @@ -24,6 +24,9 @@ scale-info = { version = "2.5.0", default-features = false, features = ["derive" static_assertions = "1.1.0" log = { version = "0.4.17", default-features = false } +# pallet-asset-conversion: turn on "num-traits" feature +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } + # primitives sp-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/consensus/babe" } @@ -32,16 +35,16 @@ sp-block-builder = { path = "../../../primitives/block-builder", default-feature sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents" } node-primitives = { version = "2.0.0", default-features = false, path = "../primitives" } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/statement-store" } -sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../primitives/version" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } # frame dependencies frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } @@ -54,6 +57,7 @@ frame-election-provider-support = { version = "4.0.0-dev", default-features = fa frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-alliance = { version = "4.0.0-dev", default-features = false, path = "../../../frame/alliance" } +pallet-asset-conversion = { version = "4.0.0-dev", default-features = false, path = "../../../frame/asset-conversion" } pallet-asset-rate = { version = "4.0.0-dev", default-features = false, path = "../../../frame/asset-rate" } pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../../../frame/assets" } pallet-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authority-discovery" } @@ -65,7 +69,7 @@ pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/child-bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "7.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } +pallet-contracts-primitives = { version = "24.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-core-fellowship = { version = "4.0.0-dev", default-features = false, path = "../../../frame/core-fellowship" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } @@ -85,6 +89,7 @@ pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../.. pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts" } pallet-nfts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts/runtime-api" } +pallet-nft-fractionalization = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nft-fractionalization" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } @@ -116,7 +121,8 @@ pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../ pallet-utility = { version = "4.0.0-dev", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-asset-tx-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/asset-tx-payment/" } +pallet-asset-conversion-tx-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } +pallet-asset-tx-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-transaction-storage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-storage" } pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../../../frame/uniques" } pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/vesting" } @@ -132,10 +138,12 @@ std = [ "pallet-whitelist/std", "pallet-offences-benchmarking?/std", "pallet-election-provider-support-benchmarking?/std", + "pallet-asset-conversion-tx-payment/std", "pallet-asset-tx-payment/std", "frame-system-benchmarking?/std", "frame-election-provider-support/std", "sp-authority-discovery/std", + "pallet-asset-conversion/std", "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", @@ -219,6 +227,7 @@ std = [ "pallet-uniques/std", "pallet-nfts/std", "pallet-nfts-runtime-api/std", + "pallet-nft-fractionalization/std", "pallet-vesting/std", "log/std", "frame-try-runtime?/std", @@ -234,6 +243,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", @@ -283,9 +293,15 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", + "pallet-nft-fractionalization/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "pallet-asset-tx-payment/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", + "pallet-offences/runtime-benchmarks", + "sp-staking/runtime-benchmarks" ] try-runtime = [ "frame-try-runtime/try-runtime", @@ -294,6 +310,7 @@ try-runtime = [ "frame-system/try-runtime", "frame-support/try-runtime", "pallet-alliance/try-runtime", + "pallet-asset-conversion/try-runtime", "pallet-assets/try-runtime", "pallet-authority-discovery/try-runtime", "pallet-authorship/try-runtime", @@ -345,10 +362,15 @@ try-runtime = [ "pallet-asset-rate/try-runtime", "pallet-utility/try-runtime", "pallet-transaction-payment/try-runtime", + "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-tx-payment/try-runtime", "pallet-transaction-storage/try-runtime", "pallet-uniques/try-runtime", "pallet-nfts/try-runtime", + "pallet-nft-fractionalization/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", + "frame-election-provider-support/try-runtime", + "sp-runtime/try-runtime" ] +unsafe-debug = ["pallet-contracts/unsafe-debug"] diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 05531f47c6e05..430a1ac2824b8 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -17,10 +17,6 @@ //! Some configurable implementations as associated type for the substrate runtime. -use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, - RuntimeCall, -}; use frame_support::{ pallet_prelude::*, traits::{ @@ -32,6 +28,11 @@ use pallet_alliance::{IdentityVerifier, ProposalIndex, ProposalProvider}; use pallet_asset_tx_payment::HandleCredit; use sp_std::prelude::*; +use crate::{ + AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, + RuntimeCall, +}; + pub struct Author; impl OnUnbalanced for Author { fn on_nonzero_unbalanced(amount: NegativeImbalance) { @@ -111,11 +112,15 @@ impl ProposalProvider for AllianceProposalProvider #[cfg(test)] mod multiplier_tests { + use frame_support::{ + dispatch::DispatchClass, + weights::{Weight, WeightToFee}, + }; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; use sp_runtime::{ assert_eq_error_rate, traits::{Convert, One, Zero}, - FixedPointNumber, + BuildStorage, FixedPointNumber, }; use crate::{ @@ -123,10 +128,6 @@ mod multiplier_tests { AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment, }; - use frame_support::{ - dispatch::DispatchClass, - weights::{Weight, WeightToFee}, - }; fn max_normal() -> Weight { BlockWeights::get() @@ -161,14 +162,28 @@ mod multiplier_tests { // bump if it is zero. let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); + let max_normal = max_normal(); + let target_weight = target(); + let normalized_weight_dimensions = ( + block_weight.ref_time() as f64 / max_normal.ref_time() as f64, + block_weight.proof_size() as f64 / max_normal.proof_size() as f64, + ); + + let (normal, max, target) = + if normalized_weight_dimensions.0 < normalized_weight_dimensions.1 { + (block_weight.proof_size(), max_normal.proof_size(), target_weight.proof_size()) + } else { + (block_weight.ref_time(), max_normal.ref_time(), target_weight.ref_time()) + }; + // maximum tx weight - let m = max_normal().ref_time() as f64; + let m = max as f64; // block weight always truncated to max weight - let block_weight = (block_weight.ref_time() as f64).min(m); + let block_weight = (normal as f64).min(m); let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight - let ss = target().ref_time() as f64; + let ss = target as f64; // Current saturation in terms of weight let s = block_weight; @@ -182,8 +197,8 @@ mod multiplier_tests { where F: Fn() -> (), { - let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() - .build_storage::() + let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::::default() + .build_storage() .unwrap() .into(); t.execute_with(|| { @@ -218,10 +233,16 @@ mod multiplier_tests { #[test] fn multiplier_can_grow_from_zero() { // if the min is too small, then this will not change, and we are doomed forever. - // the weight is 1/100th bigger than target. + // the block ref time is 1/100th bigger than target. run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || { let next = runtime_multiplier_update(min_multiplier()); - assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); + assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier()); + }); + + // the block proof size is 1/100th bigger than target. + run_with_system_weight(target().set_proof_size((target().proof_size() / 100) * 101), || { + let next = runtime_multiplier_update(min_multiplier()); + assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier()); }) } @@ -407,23 +428,33 @@ mod multiplier_tests { #[test] fn weight_to_fee_should_not_overflow_on_large_weights() { - let kb = Weight::from_parts(1024, 0); - let mb = 1024u64 * kb; + let kb_time = Weight::from_parts(1024, 0); + let kb_size = Weight::from_parts(0, 1024); + let mb_time = 1024u64 * kb_time; let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ Weight::zero(), + // testcases ignoring proof size part of the weight. Weight::from_parts(1, 0), Weight::from_parts(10, 0), Weight::from_parts(1000, 0), - kb, - 10u64 * kb, - 100u64 * kb, - mb, - 10u64 * mb, + kb_time, + 10u64 * kb_time, + 100u64 * kb_time, + mb_time, + 10u64 * mb_time, Weight::from_parts(2147483647, 0), Weight::from_parts(4294967295, 0), + // testcases ignoring ref time part of the weight. + Weight::from_parts(0, 100000000000), + 1000000u64 * kb_size, + 1000000000u64 * kb_size, + Weight::from_parts(0, 18014398509481983), + Weight::from_parts(0, 9223372036854775807), + // test cases with both parts of the weight. + BlockWeights::get().max_block / 1024, BlockWeights::get().max_block / 2, BlockWeights::get().max_block, Weight::MAX / 2, @@ -440,7 +471,14 @@ mod multiplier_tests { // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + Weight::from_parts(100, 0), t * 2, t * 4].into_iter().for_each(|i| { + vec![ + t + Weight::from_parts(100, 0), + t + Weight::from_parts(0, t.proof_size() * 2), + t * 2, + t * 4, + ] + .into_iter() + .for_each(|i| { run_with_system_weight(i, || { let fm = runtime_multiplier_update(max_fm); // won't grow. The convert saturates everything. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a2cb64cadd7c5..4e1b6d4e8bec0 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -19,16 +19,19 @@ //! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 512. -#![recursion_limit = "512"] +// `construct_runtime!` does a lot of recursion and requires us to increase the limits. +#![recursion_limit = "1024"] use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, BalancingConfig, ElectionDataProvider, SequentialPhragmen, VoteWeight, }; use frame_support::{ construct_runtime, dispatch::DispatchClass, + instances::{Instance1, Instance2}, + ord_parameter_types, pallet_prelude::Get, parameter_types, traits::{ @@ -36,7 +39,7 @@ use frame_support::{ tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, + LockIdentifier, Nothing, OnUnbalanced, WithdrawReasons, }, weights::{ constants::{ @@ -44,14 +47,15 @@ use frame_support::{ }, ConstantMultiplier, IdentityFee, Weight, }, - PalletId, RuntimeDebug, + BoundedVec, PalletId, RuntimeDebug, }; use frame_system::{ limits::{BlockLength, BlockWeights}, - EnsureRoot, EnsureRootWithSuccess, EnsureSigned, EnsureWithSuccess, + EnsureRoot, EnsureRootWithSuccess, EnsureSigned, EnsureSignedBy, EnsureWithSuccess, }; pub use node_primitives::{AccountId, Signature}; -use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; +use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; +use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; use pallet_election_provider_multi_phase::SolutionAccuracyOf; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; @@ -59,7 +63,6 @@ use pallet_nis::WithMaximumOf; use pallet_session::historical as pallet_session_historical; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; -use scale_info::TypeInfo; use sp_api::impl_runtime_apis; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -70,8 +73,8 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - self, BlakeTwo256, Block as BlockT, Bounded, ConvertInto, NumberFor, OpaqueKeys, - SaturatedConversion, StaticLookup, + self, AccountIdConversion, BlakeTwo256, Block as BlockT, Bounded, ConvertInto, NumberFor, + OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, @@ -222,13 +225,12 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = Index; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type Hash = Hash; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = Indices; - type Header = generic::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; @@ -324,8 +326,9 @@ impl InstanceFilter for ProxyType { RuntimeCall::Elections(..) | RuntimeCall::Treasury(..) ), - ProxyType::Staking => - matches!(c, RuntimeCall::Staking(..) | RuntimeCall::FastUnstake(..)), + ProxyType::Staking => { + matches!(c, RuntimeCall::Staking(..) | RuntimeCall::FastUnstake(..)) + }, } } fn is_superset(&self, o: &Self) -> bool { @@ -377,6 +380,7 @@ impl pallet_scheduler::Config for Runtime { impl pallet_glutton::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type AdminOrigin = EnsureRoot; type WeightInfo = pallet_glutton::weights::SubstrateWeight; } @@ -412,6 +416,7 @@ impl pallet_babe::Config for Runtime { type DisabledValidators = Session; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; + type MaxNominators = MaxNominatorRewardedPerValidator; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -438,15 +443,6 @@ parameter_types! { pub const MaxReserves: u32 = 50; } -/// A reason for placing a hold on funds. -#[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, Debug, TypeInfo, -)] -pub enum HoldReason { - /// The NIS Pallet has reserved it for a non-fungible receipt. - Nis, -} - impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; @@ -459,8 +455,8 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = HoldReason; - type MaxHolds = ConstU32<1>; + type RuntimeHoldReason = RuntimeHoldReason; + type MaxHolds = ConstU32<2>; } parameter_types! { @@ -491,11 +487,18 @@ impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, + pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; } +impl pallet_asset_conversion_tx_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Fungibles = Assets; + type OnChargeAssetTransaction = + pallet_asset_conversion_tx_payment::AssetConversionAdapter; +} + parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -560,6 +563,9 @@ parameter_types! { pub HistoryDepth: u32 = 84; } +/// Upper limit on the number of NPOS nominations. +const MAX_QUOTA_NOMINATIONS: u32 = 16; + pub struct StakingBenchmarkingConfig; impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig { type MaxNominators = ConstU32<1000>; @@ -567,11 +573,10 @@ impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = U128CurrencyToVote; + type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote; type RewardRemainder = Treasury; type RuntimeEvent = RuntimeEvent; type Slash = Treasury; // send the slashed funds to the treasury. @@ -592,11 +597,12 @@ impl pallet_staking::Config for Runtime { type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; + type NominationsQuota = pallet_staking::FixedNominationsQuota; // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = HistoryDepth; - type OnStakerSlash = NominationPools; + type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; } @@ -645,17 +651,20 @@ frame_election_provider_support::generate_solution_type!( VoterIndex = u32, TargetIndex = u16, Accuracy = sp_runtime::PerU16, - MaxVoters = MaxElectingVoters, + MaxVoters = MaxElectingVotersSolution, >(16) ); parameter_types! { + // Note: the EPM in this runtime runs the election on-chain. The election bounds must be + // carefully set so that an election round fits in one block. + pub ElectionBoundsMultiPhase: ElectionBounds = ElectionBoundsBuilder::default() + .voters_count(10_000.into()).targets_count(1_500.into()).build(); + pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default() + .voters_count(5_000.into()).targets_count(1_250.into()).build(); + pub MaxNominations: u32 = ::LIMIT as u32; - pub MaxElectingVoters: u32 = 40_000; - pub MaxElectableTargets: u16 = 10_000; - // OnChain values are lower. - pub MaxOnChainElectingVoters: u32 = 5000; - pub MaxOnChainElectableTargets: u16 = 1250; + pub MaxElectingVotersSolution: u32 = 40_000; // The maximum winners that can be elected by the Election pallet which is equivalent to the // maximum active validators the staking pallet can have. pub MaxActiveValidators: u32 = 1000; @@ -710,8 +719,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = ::DataProvider; type WeightInfo = frame_election_provider_support::weights::SubstrateWeight; type MaxWinners = ::MaxWinners; - type VotersBound = MaxOnChainElectingVoters; - type TargetsBound = MaxOnChainElectableTargets; + type Bounds = ElectionBoundsOnChain; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -759,9 +767,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, OffchainRandomBalancing>; type ForceOrigin = EnsureRootOrHalfCouncil; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; - type MaxElectingVoters = MaxElectingVoters; + type ElectionBounds = ElectionBoundsMultiPhase; type BenchmarkingConfig = ElectionProviderBenchmarkConfig; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; } @@ -1043,7 +1050,7 @@ impl pallet_elections_phragmen::Config for Runtime { // NOTE: this implies that council's genesis members cannot be set directly and must come from // this module. type InitializeMembers = Council; - type CurrencyToVote = U128CurrencyToVote; + type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote; type CandidacyBond = CandidacyBond; type VotingBondBase = VotingBondBase; type VotingBondFactor = VotingBondFactor; @@ -1139,11 +1146,12 @@ impl pallet_asset_rate::Config for Runtime { type CreateOrigin = EnsureRoot; type RemoveOrigin = EnsureRoot; type UpdateOrigin = EnsureRoot; - type Balance = Balance; type Currency = Balances; - type AssetId = u32; + type AssetKind = u32; type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_asset_rate::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } parameter_types! { @@ -1186,6 +1194,7 @@ impl pallet_message_queue::Config for Runtime { type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; type Size = u32; type QueueChangeHandler = (); + type QueuePausedQuery = (); type HeapSize = ConstU32<{ 64 * 1024 }>; type MaxStale = ConstU32<128>; type ServiceWeight = MessageQueueServiceWeight; @@ -1218,6 +1227,7 @@ parameter_types! { pub const DepositPerByte: Balance = deposit(0, 1); pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); pub Schedule: pallet_contracts::Schedule = Default::default(); + pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); } impl pallet_contracts::Config for Runtime { @@ -1246,6 +1256,15 @@ impl pallet_contracts::Config for Runtime { type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type RuntimeHoldReason = RuntimeHoldReason; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_contracts::migration::codegen::BenchMigrations; + type MaxDelegateDependencies = ConstU32<32>; + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + #[cfg(feature = "unsafe-debug")] + type Debug = (); } impl pallet_sudo::Config for Runtime { @@ -1261,7 +1280,6 @@ parameter_types! { pub const MaxAuthorities: u32 = 100; pub const MaxKeys: u32 = 10_000; pub const MaxPeerInHeartbeats: u32 = 10_000; - pub const MaxPeerDataEncodingSize: u32 = 1_000; } impl frame_system::offchain::CreateSignedTransaction for Runtime @@ -1272,7 +1290,7 @@ where call: RuntimeCall, public: ::Signer, account: AccountId, - nonce: Index, + nonce: Nonce, ) -> Option<(RuntimeCall, ::SignaturePayload)> { let tip = 0; // take the biggest period possible. @@ -1292,7 +1310,7 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1329,7 +1347,6 @@ impl pallet_im_online::Config for Runtime { type WeightInfo = pallet_im_online::weights::SubstrateWeight; type MaxKeys = MaxKeys; type MaxPeerInHeartbeats = MaxPeerInHeartbeats; - type MaxPeerDataEncodingSize = MaxPeerDataEncodingSize; } impl pallet_offences::Config for Runtime { @@ -1350,6 +1367,7 @@ impl pallet_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; + type MaxNominators = MaxNominatorRewardedPerValidator; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -1399,14 +1417,14 @@ impl pallet_recovery::Config for Runtime { } parameter_types! { - pub const CandidateDeposit: Balance = 10 * DOLLARS; - pub const WrongSideDeduction: Balance = 2 * DOLLARS; - pub const MaxStrikes: u32 = 10; - pub const RotationPeriod: BlockNumber = 80 * HOURS; + pub const GraceStrikes: u32 = 10; + pub const SocietyVotingPeriod: BlockNumber = 80 * HOURS; + pub const ClaimPeriod: BlockNumber = 80 * HOURS; pub const PeriodSpend: Balance = 500 * DOLLARS; pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; pub const ChallengePeriod: BlockNumber = 7 * DAYS; - pub const MaxCandidateIntake: u32 = 10; + pub const MaxPayouts: u32 = 10; + pub const MaxBids: u32 = 10; pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); } @@ -1415,18 +1433,17 @@ impl pallet_society::Config for Runtime { type PalletId = SocietyPalletId; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; - type CandidateDeposit = CandidateDeposit; - type WrongSideDeduction = WrongSideDeduction; - type MaxStrikes = MaxStrikes; + type GraceStrikes = GraceStrikes; type PeriodSpend = PeriodSpend; - type MembershipChanged = (); - type RotationPeriod = RotationPeriod; + type VotingPeriod = SocietyVotingPeriod; + type ClaimPeriod = ClaimPeriod; type MaxLockDuration = MaxLockDuration; type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan; - type SuspensionJudgementOrigin = pallet_society::EnsureFounder; - type MaxCandidateIntake = MaxCandidateIntake; type ChallengePeriod = ChallengePeriod; + type MaxPayouts = MaxPayouts; + type MaxBids = MaxBids; + type WeightInfo = pallet_society::weights::SubstrateWeight; } parameter_types! { @@ -1450,7 +1467,6 @@ impl pallet_vesting::Config for Runtime { impl pallet_mmr::Config for Runtime { const INDEXING_PREFIX: &'static [u8] = b"mmr"; type Hashing = ::Hashing; - type Hash = ::Hash; type LeafData = pallet_mmr::ParentNumberAndHash; type OnNewRoot = (); type WeightInfo = (); @@ -1483,7 +1499,7 @@ parameter_types! { pub const MetadataDepositPerByte: Balance = 1 * DOLLARS; } -impl pallet_assets::Config for Runtime { +impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = u128; type AssetId = u32; @@ -1506,6 +1522,66 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +ord_parameter_types! { + pub const AssetConversionOrigin: AccountId = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = u128; + type AssetId = u32; + type AssetIdParameter = codec::Compact; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type AssetAccountDeposit = ConstU128; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = pallet_assets::weights::SubstrateWeight; + type RemoveItemsLimit = ConstU32<1000>; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub AllowMultiAssetPools: bool = true; + pub const PoolSetupFee: Balance = 1 * DOLLARS; // should be more or equal to the existential deposit + pub const MintMinLiquidity: Balance = 100; // 100 is good enough when the main currency has 10-12 decimals. + pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero. +} + +impl pallet_asset_conversion::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type AssetBalance = ::Balance; + type HigherPrecisionBalance = sp_core::U256; + type Assets = Assets; + type Balance = u128; + type PoolAssets = PoolAssets; + type AssetId = >::AssetId; + type MultiAssetId = NativeOrAssetId; + type PoolAssetId = >::AssetId; + type PalletId = AssetConversionPalletId; + type LPFee = ConstU32<3>; // means 0.3% + type PoolSetupFee = PoolSetupFee; + type PoolSetupFeeReceiver = AssetConversionOrigin; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type WeightInfo = pallet_asset_conversion::weights::SubstrateWeight; + type AllowMultiAssetPools = AllowMultiAssetPools; + type MaxSwapPathLength = ConstU32<4>; + type MintMinLiquidity = MintMinLiquidity; + type MultiAssetIdConverter = NativeOrAssetIdConverter; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + parameter_types! { pub const QueueCount: u32 = 300; pub const MaxQueueLen: u32 = 1000; @@ -1518,7 +1594,6 @@ parameter_types! { pub const ThawThrottle: (Perquintill, BlockNumber) = (Perquintill::from_percent(25), 5); pub Target: Perquintill = Perquintill::zero(); pub const NisPalletId: PalletId = PalletId(*b"py/nis "); - pub const NisHoldReason: HoldReason = HoldReason::Nis; } impl pallet_nis::Config for Runtime { @@ -1542,14 +1617,12 @@ impl pallet_nis::Config for Runtime { type IntakePeriod = IntakePeriod; type MaxIntakeWeight = MaxIntakeWeight; type ThawThrottle = ThawThrottle; - type HoldReason = NisHoldReason; + type RuntimeHoldReason = RuntimeHoldReason; } parameter_types! { pub const CollectionDeposit: Balance = 100 * DOLLARS; pub const ItemDeposit: Balance = 1 * DOLLARS; - pub const KeyLimit: u32 = 32; - pub const ValueLimit: u32 = 256; pub const ApprovalsLimit: u32 = 20; pub const ItemAttributesApprovalsLimit: u32 = 20; pub const MaxTips: u32 = 10; @@ -1567,9 +1640,9 @@ impl pallet_uniques::Config for Runtime { type MetadataDepositBase = MetadataDepositBase; type AttributeDepositBase = MetadataDepositBase; type DepositPerByte = MetadataDepositPerByte; - type StringLimit = StringLimit; - type KeyLimit = KeyLimit; - type ValueLimit = ValueLimit; + type StringLimit = ConstU32<128>; + type KeyLimit = ConstU32<32>; + type ValueLimit = ConstU32<64>; type WeightInfo = pallet_uniques::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Helper = (); @@ -1612,6 +1685,32 @@ impl pallet_core_fellowship::Config for Runtime { type EvidenceSize = ConstU32<16_384>; } +parameter_types! { + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); +} + +impl pallet_nft_fractionalization::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Deposit = AssetDeposit; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; + type StringLimit = StringLimit; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = >::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionalizationPalletId; + type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type RuntimeHoldReason = RuntimeHoldReason; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + parameter_types! { pub Features: PalletFeatures = PalletFeatures::all_enabled(); pub const MaxAttributesPerCall: u32 = 10; @@ -1628,9 +1727,9 @@ impl pallet_nfts::Config for Runtime { type MetadataDepositBase = MetadataDepositBase; type AttributeDepositBase = MetadataDepositBase; type DepositPerByte = MetadataDepositPerByte; - type StringLimit = StringLimit; - type KeyLimit = KeyLimit; - type ValueLimit = ValueLimit; + type StringLimit = ConstU32<256>; + type KeyLimit = ConstU32<64>; + type ValueLimit = ConstU32<256>; type ApprovalsLimit = ApprovalsLimit; type ItemAttributesApprovalsLimit = ItemAttributesApprovalsLimit; type MaxTips = MaxTips; @@ -1778,10 +1877,7 @@ impl pallet_statement::Config for Runtime { } construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = node_primitives::Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: frame_system, Utility: pallet_utility, @@ -1794,6 +1890,7 @@ construct_runtime!( Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, AssetTxPayment: pallet_asset_tx_payment, + AssetConversionTxPayment: pallet_asset_conversion_tx_payment, ElectionProviderMultiPhase: pallet_election_provider_multi_phase, Staking: pallet_staking, Session: pallet_session, @@ -1823,12 +1920,14 @@ construct_runtime!( Multisig: pallet_multisig, Bounties: pallet_bounties, Tips: pallet_tips, - Assets: pallet_assets, + Assets: pallet_assets::, + PoolAssets: pallet_assets::, Mmr: pallet_mmr, Lottery: pallet_lottery, Nis: pallet_nis, Uniques: pallet_uniques, Nfts: pallet_nfts, + NftFractionalization: pallet_nft_fractionalization, Salary: pallet_salary, CoreFellowship: pallet_core_fellowship, TransactionStorage: pallet_transaction_storage, @@ -1845,6 +1944,7 @@ construct_runtime!( NominationPools: pallet_nomination_pools, RankedPolls: pallet_referenda::, RankedCollective: pallet_ranked_collective, + AssetConversion: pallet_asset_conversion, FastUnstake: pallet_fast_unstake, MessageQueue: pallet_message_queue, Pov: frame_benchmarking_pallet_pov, @@ -1875,7 +1975,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_tx_payment::ChargeAssetTxPayment, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, ); /// Unchecked extrinsic type as expected by this runtime. @@ -1914,7 +2014,7 @@ mod mmr { pub use pallet_mmr::primitives::*; pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; - pub type Hash = ::Hash; + pub type Hash = ::Output; pub type Hashing = ::Hashing; } @@ -1935,6 +2035,7 @@ mod benches { [pallet_contracts, Contracts] [pallet_core_fellowship, CoreFellowship] [pallet_democracy, Democracy] + [pallet_asset_conversion, AssetConversion] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] @@ -1961,6 +2062,7 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_glutton, Glutton] [pallet_session, SessionBench::] + [pallet_society, Society] [pallet_staking, Staking] [pallet_state_trie_migration, StateTrieMigration] [pallet_sudo, Sudo] @@ -1972,6 +2074,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_uniques, Uniques] [pallet_nfts, Nfts] + [pallet_nft_fractionalization, NftFractionalization] [pallet_utility, Utility] [pallet_vesting, Vesting] [pallet_whitelist, Whitelist] @@ -2161,8 +2264,8 @@ impl_runtime_apis! { } } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } @@ -2271,6 +2374,26 @@ impl_runtime_apis! { } } + impl pallet_asset_conversion::AssetConversionApi< + Block, + Balance, + u128, + NativeOrAssetId + > for Runtime + { + fn quote_price_exact_tokens_for_tokens(asset1: NativeOrAssetId, asset2: NativeOrAssetId, amount: u128, include_fee: bool) -> Option { + AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) + } + + fn quote_price_tokens_for_exact_tokens(asset1: NativeOrAssetId, asset2: NativeOrAssetId, amount: u128, include_fee: bool) -> Option { + AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) + } + + fn get_reserves(asset1: NativeOrAssetId, asset2: NativeOrAssetId) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(&asset1, &asset2).ok() + } + } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index a43b2b9ba13e5..711c48c34329d 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } fs_extra = "1" futures = "0.3.21" log = "0.4.17" @@ -22,9 +22,10 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } node-executor = { version = "3.0.0-dev", path = "../executor" } node-primitives = { version = "2.0.0", path = "../primitives" } kitchensink-runtime = { version = "3.0.0-dev", path = "../runtime" } +pallet-asset-conversion = { version = "4.0.0-dev", path = "../../../frame/asset-conversion" } pallet-assets = { version = "4.0.0-dev", path = "../../../frame/assets" } +pallet-asset-conversion-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sc-client-db = { version = "0.10.0-dev", features = ["rocksdb"], path = "../../../client/db" } @@ -38,10 +39,10 @@ sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 9dbb26a906e6f..f1ab2212239b1 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -40,17 +40,14 @@ use kitchensink_runtime::{ }; use node_primitives::Block; use sc_block_builder::BlockBuilderProvider; -use sc_client_api::{ - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, - ExecutionStrategy, -}; +use sc_client_api::execution_extensions::ExecutionExtensions; use sc_client_db::PruningMode; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod, WasmtimeInstantiationStrategy}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_consensus::BlockOrigin; -use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, ExecutionContext, Pair, Public}; +use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, Pair, Public}; use sp_inherents::InherentData; use sp_runtime::{ traits::{Block as BlockT, IdentifyAccount, Verify}, @@ -354,7 +351,7 @@ impl BenchDb { dir.path().to_string_lossy(), ); let (_client, _backend, _task_executor) = - Self::bench_client(database_type, dir.path(), Profile::Native, &keyring); + Self::bench_client(database_type, dir.path(), &keyring); let directory_guard = Guard(dir); BenchDb { keyring, directory_guard, database_type } @@ -380,7 +377,6 @@ impl BenchDb { fn bench_client( database_type: DatabaseType, dir: &std::path::Path, - profile: Profile, keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { @@ -415,12 +411,7 @@ impl BenchDb { genesis_block_builder, None, None, - ExecutionExtensions::new( - profile.into_execution_strategies(), - None, - None, - Arc::new(executor), - ), + ExecutionExtensions::new(None, Arc::new(executor)), Box::new(task_executor.clone()), None, None, @@ -444,11 +435,7 @@ impl BenchDb { client .runtime_api() - .inherent_extrinsics_with_context( - client.chain_info().genesis_hash, - ExecutionContext::BlockConstruction, - inherent_data, - ) + .inherent_extrinsics(client.chain_info().genesis_hash, inherent_data) .expect("Get inherents failed") } @@ -459,12 +446,8 @@ impl BenchDb { /// Get cliet for this database operations. pub fn client(&mut self) -> Client { - let (client, _backend, _task_executor) = Self::bench_client( - self.database_type, - self.directory_guard.path(), - Profile::Wasm, - &self.keyring, - ); + let (client, _backend, _task_executor) = + Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring); client } @@ -507,10 +490,10 @@ impl BenchDb { } /// Clone this database and create context for testing/benchmarking. - pub fn create_context(&self, profile: Profile) -> BenchContext { + pub fn create_context(&self) -> BenchContext { let BenchDb { directory_guard, keyring, database_type } = self.clone(); let (client, backend, task_executor) = - Self::bench_client(database_type, directory_guard.path(), profile, &keyring); + Self::bench_client(database_type, directory_guard.path(), &keyring); BenchContext { client: Arc::new(client), @@ -603,7 +586,7 @@ impl BenchKeyring { } /// Generate genesis with accounts from this keyring endowed with some balance. - pub fn generate_genesis(&self) -> kitchensink_runtime::GenesisConfig { + pub fn generate_genesis(&self) -> kitchensink_runtime::RuntimeGenesisConfig { crate::genesis::config_endowed( Some(kitchensink_runtime::wasm_binary_unwrap()), self.collect_account_ids(), @@ -611,36 +594,6 @@ impl BenchKeyring { } } -/// Profile for exetion strategies. -#[derive(Clone, Copy, Debug)] -pub enum Profile { - /// As native as possible. - Native, - /// As wasm as possible. - Wasm, -} - -impl Profile { - fn into_execution_strategies(self) -> ExecutionStrategies { - match self { - Profile::Wasm => ExecutionStrategies { - syncing: ExecutionStrategy::AlwaysWasm, - importing: ExecutionStrategy::AlwaysWasm, - block_construction: ExecutionStrategy::AlwaysWasm, - offchain_worker: ExecutionStrategy::AlwaysWasm, - other: ExecutionStrategy::AlwaysWasm, - }, - Profile::Native => ExecutionStrategies { - syncing: ExecutionStrategy::NativeElseWasm, - importing: ExecutionStrategy::NativeElseWasm, - block_construction: ExecutionStrategy::NativeElseWasm, - offchain_worker: ExecutionStrategy::NativeElseWasm, - other: ExecutionStrategy::NativeElseWasm, - }, - } - } -} - struct Guard(tempfile::TempDir); impl Guard { diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 8594a4a2e2d32..c55867360bd62 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -18,7 +18,6 @@ //! Utilities to build a `TestClient` for `kitchensink-runtime`. -use sc_service::client; use sp_runtime::BuildStorage; /// Re-export test-client utilities. pub use substrate_test_client::*; @@ -37,9 +36,6 @@ pub type Client = client::Client< kitchensink_runtime::RuntimeApi, >; -/// Transaction for kitchensink-runtime. -pub type Transaction = sc_client_api::backend::TransactionFor; - /// Genesis configuration parameters for `TestClient`. #[derive(Default)] pub struct GenesisParameters; diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index d542bb29c2539..8063b8ef45708 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -21,20 +21,21 @@ use crate::keyring::*; use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AccountId, AssetsConfig, BabeConfig, - BalancesConfig, GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, - StakerStatus, StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, + BalancesConfig, GluttonConfig, GrandpaConfig, IndicesConfig, RuntimeGenesisConfig, + SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, + BABE_GENESIS_EPOCH_CONFIG, }; use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. -pub fn config(code: Option<&[u8]>) -> GenesisConfig { +pub fn config(code: Option<&[u8]>) -> RuntimeGenesisConfig { config_endowed(code, Default::default()) } /// Create genesis runtime configuration for tests with some extra /// endowed accounts. -pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> GenesisConfig { +pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> RuntimeGenesisConfig { let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), @@ -46,9 +47,10 @@ pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> Gen endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); - GenesisConfig { + RuntimeGenesisConfig { system: SystemConfig { code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), + ..Default::default() }, indices: IndicesConfig { indices: vec![] }, balances: BalancesConfig { balances: endowed }, @@ -75,8 +77,12 @@ pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> Gen invulnerables: vec![alice(), bob(), charlie()], ..Default::default() }, - babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG) }, - grandpa: GrandpaConfig { authorities: vec![] }, + babe: BabeConfig { + authorities: vec![], + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() + }, + grandpa: GrandpaConfig { authorities: vec![], _config: Default::default() }, im_online: Default::default(), authority_discovery: Default::default(), democracy: Default::default(), @@ -86,13 +92,20 @@ pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> Gen elections: Default::default(), sudo: Default::default(), treasury: Default::default(), - society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999 }, + society: SocietyConfig { pot: 0 }, vesting: Default::default(), assets: AssetsConfig { assets: vec![(9, alice(), true, 1)], ..Default::default() }, + pool_assets: Default::default(), transaction_storage: Default::default(), transaction_payment: Default::default(), alliance: Default::default(), alliance_motion: Default::default(), nomination_pools: Default::default(), + glutton: GluttonConfig { + compute: Default::default(), + storage: Default::default(), + trash_data_count: Default::default(), + ..Default::default() + }, } } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index e16502bf17554..b4b714d9083d6 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -20,7 +20,7 @@ use codec::Encode; use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; -use node_primitives::{AccountId, Balance, Index}; +use node_primitives::{AccountId, Balance, Nonce}; use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; @@ -68,7 +68,7 @@ pub fn to_session_keys( } /// Returns transaction extra. -pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { +pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), @@ -77,7 +77,7 @@ pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ) } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index cb80963cc6f93..c592f8ac226c3 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -13,6 +13,13 @@ publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[[bin]] +path = "bin/main.rs" +name = "chain-spec-builder" + +[lib] +crate-type = ["rlib"] + [dependencies] ansi_term = "0.12.1" clap = { version = "4.2.5", features = ["derive"] } @@ -20,5 +27,5 @@ rand = "0.8" node-cli = { version = "3.0.0-dev", path = "../../node/cli" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } diff --git a/bin/utils/chain-spec-builder/README.md b/bin/utils/chain-spec-builder/README.md deleted file mode 100644 index 3e9ac0bddbdc1..0000000000000 --- a/bin/utils/chain-spec-builder/README.md +++ /dev/null @@ -1 +0,0 @@ -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/bin/utils/chain-spec-builder/bin/main.rs b/bin/utils/chain-spec-builder/bin/main.rs new file mode 100644 index 0000000000000..53e11abbf6282 --- /dev/null +++ b/bin/utils/chain-spec-builder/bin/main.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use chain_spec_builder::{ + generate_authority_keys_and_store, generate_chain_spec, print_seeds, ChainSpecBuilder, +}; +use clap::Parser; +use node_cli::chain_spec; +use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; +use sp_core::{crypto::Ss58Codec, sr25519}; +use std::fs; + +fn main() -> Result<(), String> { + #[cfg(build_type = "debug")] + println!( + "The chain spec builder builds a chain specification that includes a Substrate runtime \ + compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ + the chain spec builder binary in `--release` mode.\n", + ); + + let builder = ChainSpecBuilder::parse(); + let chain_spec_path = builder.chain_spec_path().to_path_buf(); + + let (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) = match builder { + ChainSpecBuilder::Generate { authorities, nominators, endowed, keystore_path, .. } => { + let authorities = authorities.max(1); + let rand_str = || -> String { + OsRng.sample_iter(&Alphanumeric).take(32).map(char::from).collect() + }; + + let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); + let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); + let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); + let sudo_seed = rand_str(); + + print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); + + if let Some(keystore_path) = keystore_path { + generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; + } + + let nominator_accounts = nominator_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); + + let endowed_accounts = endowed_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); + + let sudo_account = + chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); + + (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) + }, + ChainSpecBuilder::New { + authority_seeds, + nominator_accounts, + endowed_accounts, + sudo_account, + .. + } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), + }; + + let json = + generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account)?; + + fs::write(chain_spec_path, json).map_err(|err| err.to_string()) +} diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/lib.rs similarity index 70% rename from bin/utils/chain-spec-builder/src/main.rs rename to bin/utils/chain-spec-builder/src/lib.rs index f94fca1c1638d..528b6b70115a0 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/lib.rs @@ -16,28 +16,34 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - fs, - path::{Path, PathBuf}, -}; +//! Substrate's chain spec builder utility. +//! +//! A chain-spec is short for `chain-configuration`. See the [`sc-chain-spec`] for more information. +//! +//! Note that this binary is analogous to the `build-spec` subcommand, contained in typical +//! substrate-based nodes. This particular binary is capable of building a more sophisticated chain +//! specification that can be used with the substrate-node, ie. [`node-cli`]. +//! +//! See [`ChainSpecBuilder`] for a list of available commands. +//! +//! [`sc-chain-spec`]: ../sc_chain_spec/index.html +//! [`node-cli`]: ../node_cli/index.html + +use std::path::{Path, PathBuf}; use ansi_term::Style; use clap::Parser; -use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use node_cli::chain_spec::{self, AccountId}; use sc_keystore::LocalKeystore; -use sp_core::{ - crypto::{ByteArray, Ss58Codec}, - sr25519, -}; +use sp_core::crypto::{ByteArray, Ss58Codec}; use sp_keystore::KeystorePtr; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. #[derive(Parser)] #[command(rename_all = "kebab-case")] -enum ChainSpecBuilder { +pub enum ChainSpecBuilder { /// Create a new chain spec with the given authorities, endowed and sudo /// accounts. New { @@ -87,7 +93,7 @@ enum ChainSpecBuilder { impl ChainSpecBuilder { /// Returns the path where the chain spec should be saved. - fn chain_spec_path(&self) -> &Path { + pub fn chain_spec_path(&self) -> &Path { match self { ChainSpecBuilder::New { chain_spec_path, .. } => chain_spec_path.as_path(), ChainSpecBuilder::Generate { chain_spec_path, .. } => chain_spec_path.as_path(), @@ -100,7 +106,7 @@ fn genesis_constructor( nominator_accounts: &[AccountId], endowed_accounts: &[AccountId], sudo_account: &AccountId, -) -> chain_spec::GenesisConfig { +) -> chain_spec::RuntimeGenesisConfig { let authorities = authority_seeds .iter() .map(AsRef::as_ref) @@ -115,7 +121,8 @@ fn genesis_constructor( ) } -fn generate_chain_spec( +/// Generate the chain spec using the given seeds and accounts. +pub fn generate_chain_spec( authority_seeds: Vec, nominator_accounts: Vec, endowed_accounts: Vec, @@ -161,7 +168,11 @@ fn generate_chain_spec( chain_spec.as_json(false) } -fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> { +/// Generate the authority keys and store them in the given `keystore_path`. +pub fn generate_authority_keys_and_store( + seeds: &[String], + keystore_path: &Path, +) -> Result<(), String> { for (n, seed) in seeds.iter().enumerate() { let keystore: KeystorePtr = LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) @@ -192,7 +203,8 @@ fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Ok(()) } -fn print_seeds( +/// Print the given seeds +pub fn print_seeds( authority_seeds: &[String], nominator_seeds: &[String], endowed_seeds: &[String], @@ -227,66 +239,3 @@ fn print_seeds( println!("{}", header.paint("Sudo seed")); println!("//{}", sudo_seed); } - -fn main() -> Result<(), String> { - #[cfg(build_type = "debug")] - println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime \ - compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ - the chain spec builder binary in `--release` mode.\n", - ); - - let builder = ChainSpecBuilder::parse(); - let chain_spec_path = builder.chain_spec_path().to_path_buf(); - - let (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) = match builder { - ChainSpecBuilder::Generate { authorities, nominators, endowed, keystore_path, .. } => { - let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric).take(32).map(char::from).collect() - }; - - let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); - let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); - let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); - let sudo_seed = rand_str(); - - print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); - - if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; - } - - let nominator_accounts = nominator_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - - let endowed_accounts = endowed_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - - let sudo_account = - chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); - - (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) - }, - ChainSpecBuilder::New { - authority_seeds, - nominator_accounts, - endowed_accounts, - sudo_account, - .. - } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), - }; - - let json = - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account)?; - - fs::write(chain_spec_path, json).map_err(|err| err.to_string()) -} diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 9e3abb804f3b9..5ef0da81a09d5 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -2,12 +2,12 @@ name = "subkey" version = "3.0.0" authors = ["Parity Technologies "] +description = "Generate and restore keys for Substrate based chains such as Polkadot, Kusama and a growing number of parachains and Substrate based projects." edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md index ddef20f61e134..d19ccefb59aae 100644 --- a/bin/utils/subkey/README.md +++ b/bin/utils/subkey/README.md @@ -25,10 +25,9 @@ You will need to have the Substrate build dependencies to install Subkey. Use th Command: ```bash -# Use the `--fast` flag to get the dependencies without needing to install the Substrate and Subkey binary -curl https://getsubstrate.io -sSf | bash -s -- --fast # Install only `subkey`, at a specific version of the subkey crate cargo install --force subkey --git https://github.com/paritytech/substrate --version --locked +# If you run into issues building, you likely are missing deps defined in https://docs.substrate.io/install/ ``` ### Run in a container @@ -69,7 +68,7 @@ The output above also show the **public key** and the **Account ID**. Those are The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for a given network (for instance Kusama or Polkadot). -You can read more about the SS58 format in the [Substrate Docs](https://docs.substrate.io/v3/advanced/ss58/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). +You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 201d4d25f84ab..f3023acde4047 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -16,6 +16,298 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! # Subkey +//! +//! Subkey is a commandline utility included with Substrate. It allows generating and restoring keys +//! for Substrate based chains such as Polkadot, Kusama and a growing number of parachains and +//! Substrate based projects. + +//! `subkey` provides a few sub-commands to generate keys, check keys, sign messages, verify +//! messages, etc... +//! +//! You can see the full list of commands with `subkey --help`. Most commands have additional help +//! available with for instance `subkey generate --help` for the `generate` command. +//! +//! ## Safety first +//! +//! `subkey` does not need an internet connection to work. Indeed, for the best security, you should +//! be using `subkey` on a machine that is **not connected** to the internet. +//! +//! `subkey` deals with **seeds** and **private keys**. Make sure to use `subkey` in a safe +//! environment (ie. no one looking over your shoulder) and on a safe computer (ie. no one able to +//! check your command history). +//! +//! If you save any output of `subkey` into a file, make sure to apply proper permissions and/or +//! delete the file as soon as possible. +//! +//! ## Usage +//! +//! The following guide explains *some* of the `subkey` commands. For the full list and the most up +//! to date documentation, make sure to check the integrated help with `subkey --help`. +//! +//! ### Install with Cargo +//! +//! You will need to have the Substrate build dependencies to install Subkey. Use the following two +//! commands to install the dependencies and Subkey, respectively: +//! +//! Command: +//! +//! ```bash +//! # Install only `subkey`, at a specific version of the subkey crate +//! cargo install --force subkey --git https://github.com/paritytech/substrate --version --locked +//! # If you run into issues building, you likely are missing deps defined in https://docs.substrate.io/install/ +//! ``` +//! +//! ### Run in a container +//! +//! ```bash +//! # Use `--pull=always` with the `latest` tag, or specify a version in a tag +//! docker run -it --pull=always docker.io/parity/subkey:latest +//! ``` +//! +//! ### Generate a random account +//! +//! Generating a new key is as simple as running: +//! +//! ```bash +//! subkey generate +//! ``` +//! +//! The output looks similar to: +//! +//! ```text +//! Secret phrase `hotel forest jar hover kite book view eight stuff angle legend defense` is account: +//! Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +//! Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +//! Account ID: 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +//! SS58 Address: 5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte +//! ``` +//! +//! --- +//! ☠️ DO NT RE-USE ANY OF THE SEEDS AND SECRETS FROM THIS PAGE ☠️. +//! +//! You can read more about security and risks in [SECURITY.md](./SECURITY.md) and in the [Polkadot Wiki](https://wiki.polkadot.network/docs/learn-account-generation). +//! +//! --- +//! +//! The output above shows a **secret phrase** (also called **mnemonic phrase**) and the **secret +//! seed** (also called **Private Key**). Those 2 secrets are the pieces of information you MUST +//! keep safe and secret. All the other information below can be derived from those secrets. +//! +//! The output above also show the **public key** and the **Account ID**. Those are the independant +//! from the network where you will use the key. +//! +//! The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public +//! keys of an account for a given network (for instance Kusama or Polkadot). +//! +//! You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). +//! +//! For instance, considering the previous seed +//! `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: +//! +//! - Polkadot: `16m4J167Mptt8UXL8aGSAi7U2FnPpPxZHPrCgMG9KJzVoFqM` +//! - Kusama: `JLNozAv8QeLSbLFwe2UvWeKKE4yvmDbfGxTuiYkF2BUMx4M` +//! +//! ### Json output +//! +//! `subkey` can calso generate the output as *json*. This is useful for automation. +//! +//! command: +//! +//! ```bash +//! subkey generate --output-type json +//! ``` +//! +//! output: +//! +//! ```json +//! { +//! "accountId": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", +//! "publicKey": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", +//! "secretPhrase": "hotel forest jar hover kite book view eight stuff angle legend defense", +//! "secretSeed": "0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d", +//! "ss58Address": "5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte" +//! } +//! ``` +//! +//! So if you only want to get the `secretSeed` for instance, you can use: +//! +//! command: +//! +//! ```bash +//! subkey generate --output-type json | jq -r .secretSeed +//! ``` +//! +//! output: +//! +//! ```text +//! 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +//! ``` +//! +//! ### Additional user-defined password +//! +//! `subkey` supports an additional user-defined secret that will be appended to the seed. Let's see +//! the following example: +//! +//! ```bash +//! subkey generate --password extra_secret +//! ``` +//! +//! output: +//! +//! ```text +//! Secret phrase `soup lyrics media market way crouch elevator put moon useful question wide` is account: +//! Secret seed: 0xe7cfd179d6537a676cb94bac3b5c5c9cb1550e846ac4541040d077dfbac2e7fd +//! Public key (hex): 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d +//! Account ID: 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d +//! SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC +//! ``` +//! +//! Using the `inspect` command (see more details below), we see that knowning only the **secret +//! seed** is no longer sufficient to recover the account: +//! +//! ```bash +//! subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" +//! ``` +//! +//! which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not +//! `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined +//! **password** (`extra_secret` in our example) is now required to fully recover the account. Let's +//! inspect the the previous mnemonic, this time passing also the required `password` as shown +//! below: +//! +//! ```bash +//! subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" +//! ``` +//! +//! This time, we properly recovered `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC`. +//! +//! ### Inspecting a key +//! +//! If you have *some data* about a key, `subkey inpsect` will help you discover more information +//! about it. +//! +//! If you have **secrets** that you would like to verify for instance, you can use: +//! +//! ```bash +//! subkey inspect < mnemonic | seed > +//! ``` +//! +//! If you have only **public data**, you can see a subset of the information: +//! +//! ```bash +//! subkey inspect --public < pubkey | address > +//! ``` +//! +//! **NOTE**: While you will be able to recover the secret seed from the mnemonic, the opposite is +//! not possible. +//! +//! **NOTE**: For obvious reasons, the **secrets** cannot be recovered from passing **public data** +//! such as `pubkey` or `address` as input. +//! +//! command: +//! +//! ```bash +//! subkey inspect 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +//! ``` +//! +//! output: +//! +//! ```text +//! Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` is account: +//! Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +//! Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +//! Account ID: 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +//! SS58 Address: 5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte +//! ``` +//! +//! ### Signing +//! +//! `subkey` allows using a **secret key** to sign a random message. The signature can then be +//! verified by anyone using your **public key**: +//! +//! ```bash +//! echo -n | subkey sign --suri +//! ``` +//! +//! example: +//! +//! ```text +//! MESSAGE=hello +//! SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +//! echo -n $MESSAGE | subkey sign --suri $SURI +//! ``` +//! +//! output: +//! +//! ```text +//! 9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +//! ``` +//! +//! **NOTE**: Each run of the `sign` command will yield a different output. While each signature is +//! different, they are all valid. +//! +//! ### Verifying a signature +//! +//! Given a message, a signature and an address, `subkey` can verify whether the **message** has +//! been digitally signed by the holder (or one of the holders) of the **private key** for the given +//! **address**: +//! +//! ```bash +//! echo -n | subkey verify
+//! ``` +//! +//! example: +//! +//! ```bash +//! MESSAGE=hello +//! URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +//! SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +//! echo -n $MESSAGE | subkey verify $SIGNATURE $URI +//! ``` +//! +//! output: +//! +//! ```text +//! Signature verifies correctly. +//! ``` +//! +//! A failure looks like: +//! +//! ```text +//! Error: SignatureInvalid +//! ``` +//! +//! ### Using the vanity generator +//! +//! You can use the included vanity generator to find a seed that provides an address which includes +//! the desired pattern. Be warned, depending on your hardware this may take a while. +//! +//! command: +//! +//! ```bash +//! subkey vanity --network polkadot --pattern bob +//! ``` +//! +//! output: +//! +//! ```text +//! Generating key containing pattern 'bob' +//! best: 190 == top: 189 +//! Secret Key URI `0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691` is account: +//! Secret seed: 0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691 +//! Public key (hex): 0x1a8b32e95c1f571118ea0b84801264c3c70f823e320d099e5de31b9b1f18f843 +//! Account ID: 0x1a8b32e95c1f571118ea0b84801264c3c70f823e320d099e5de31b9b1f18f843 +//! SS58 Address: 1bobYxBPjZWRPbVo35aSwci1u5Zmq8P6J2jpa4kkudBZMqE +//! ``` +//! +//! `Bob` now got a nice address starting with their name: +//! 1**bob**YxBPjZWRPbVo35aSwci1u5Zmq8P6J2jpa4kkudBZMqE. +//! +//! **Note**: While `Bob`, having a short name (3 chars), got a result rather quickly, it will take +//! much longer for `Alice` who has a much longer name, thus the chances to generate a random +//! address that contains the chain `alice` will be much smaller. + use clap::Parser; use sc_cli::{ Error, GenerateCmd, GenerateNodeKeyCmd, InspectKeyCmd, InspectNodeKeyCmd, SignCmd, VanityCmd, diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml index 729decb5ebb3f..d7e1f5198255c 100644 --- a/client/allocator/Cargo.toml +++ b/client/allocator/Cargo.toml @@ -16,5 +16,5 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.17" thiserror = "1.0.30" -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-wasm-interface = { version = "7.0.0", path = "../../primitives/wasm-interface" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-wasm-interface = { version = "14.0.0", path = "../../primitives/wasm-interface" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 02f4292aac594..17f9747c39bc1 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } fnv = "1.0.6" @@ -28,14 +28,13 @@ sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } -sp-externalities = { version = "0.13.0", path = "../../primitives/externalities" } -sp-keystore = { version = "0.13.0", default-features = false, path = "../../primitives/keystore" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } -sp-storage = { version = "7.0.0", path = "../../primitives/storage" } +sp-storage = { version = "13.0.0", path = "../../primitives/storage" } [dev-dependencies] thiserror = "1.0.30" diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index b88feafb6ca3a..2d8fdef77cdb9 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -25,7 +25,7 @@ use parking_lot::RwLock; use sp_consensus::BlockOrigin; use sp_core::offchain::OffchainStorage; use sp_runtime::{ - traits::{Block as BlockT, HashFor, NumberFor}, + traits::{Block as BlockT, HashingFor, NumberFor}, Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ @@ -36,17 +36,11 @@ use sp_storage::{ChildInfo, StorageData, StorageKey}; use crate::{blockchain::Backend as BlockchainBackend, UsageInfo}; -pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; +pub use sp_state_machine::{Backend as StateBackend, BackendTransaction, KeyValueStates}; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; -/// Extracts the transaction for the given state backend. -pub type TransactionForSB = >>::Transaction; - -/// Extracts the transaction for the given backend. -pub type TransactionFor = TransactionForSB, Block>; - /// Describes which block import notification stream should be notified. #[derive(Debug, Clone, Copy)] pub enum ImportNotificationAction { @@ -161,7 +155,7 @@ impl NewBlockState { /// Keeps hold if the inserted block state and data. pub trait BlockImportOperation { /// Associated state backend type. - type State: StateBackend>; + type State: StateBackend>; /// Returns pending state. /// @@ -181,7 +175,7 @@ pub trait BlockImportOperation { /// Inject storage data into the database. fn update_db_storage( &mut self, - update: TransactionForSB, + update: BackendTransaction>, ) -> sp_blockchain::Result<()>; /// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written @@ -315,16 +309,16 @@ pub trait AuxStore { /// An `Iterator` that iterates keys in a given block under a prefix. pub struct KeysIter where - State: StateBackend>, + State: StateBackend>, Block: BlockT, { - inner: >>::RawIter, + inner: >>::RawIter, state: State, } impl KeysIter where - State: StateBackend>, + State: StateBackend>, Block: BlockT, { /// Create a new iterator over storage keys. @@ -361,7 +355,7 @@ where impl Iterator for KeysIter where Block: BlockT, - State: StateBackend>, + State: StateBackend>, { type Item = StorageKey; @@ -373,17 +367,17 @@ where /// An `Iterator` that iterates keys and values in a given block under a prefix. pub struct PairsIter where - State: StateBackend>, + State: StateBackend>, Block: BlockT, { - inner: >>::RawIter, + inner: >>::RawIter, state: State, } impl Iterator for PairsIter where Block: BlockT, - State: StateBackend>, + State: StateBackend>, { type Item = (StorageKey, StorageData); @@ -397,7 +391,7 @@ where impl PairsIter where - State: StateBackend>, + State: StateBackend>, Block: BlockT, { /// Create a new iterator over storage key and value pairs. @@ -506,11 +500,11 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> + type State: StateBackend> + Send + AsTrieBackend< - HashFor, - TrieBackendStorage = >>::TrieBackendStorage, + HashingFor, + TrieBackendStorage = >>::TrieBackendStorage, >; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index db8e4d8495af2..49b51ccc943ed 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -20,12 +20,13 @@ use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_core::traits::CallContext; +use sp_externalities::Extensions; use sp_runtime::traits::Block as BlockT; -use sp_state_machine::{ExecutionStrategy, OverlayedChanges, StorageProof}; +use sp_state_machine::{OverlayedChanges, StorageProof}; use std::cell::RefCell; use crate::execution_extensions::ExecutionExtensions; -use sp_api::{ExecutionContext, ProofRecorder, StorageTransactionCache}; +use sp_api::{HashingFor, ProofRecorder}; /// Executor Provider pub trait ExecutorProvider { @@ -58,7 +59,6 @@ pub trait CallExecutor: RuntimeVersionOf { at_hash: B::Hash, method: &str, call_data: &[u8], - strategy: ExecutionStrategy, context: CallContext, ) -> Result, sp_blockchain::Error>; @@ -72,14 +72,10 @@ pub trait CallExecutor: RuntimeVersionOf { at_hash: B::Hash, method: &str, call_data: &[u8], - changes: &RefCell, - storage_transaction_cache: Option< - &RefCell< - StorageTransactionCache>::State>, - >, - >, + changes: &RefCell>>, proof_recorder: &Option>, - context: ExecutionContext, + call_context: CallContext, + extensions: &RefCell, ) -> sp_blockchain::Result>; /// Extract RuntimeVersion of given block diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 9ff4b6db418ad..6f927105df0bf 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -22,53 +22,11 @@ //! strategy for the runtime calls and provide the right `Externalities` //! extensions to support APIs for particular execution context & capabilities. -use codec::Decode; use parking_lot::RwLock; -use sc_transaction_pool_api::OffchainSubmitTransaction; -use sp_core::{ - offchain::{self, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, - traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}, - ExecutionContext, -}; +use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt}; use sp_externalities::{Extension, Extensions}; -use sp_keystore::{KeystoreExt, KeystorePtr}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor}, -}; -pub use sp_state_machine::ExecutionStrategy; -use sp_state_machine::{DefaultHandler, ExecutionManager}; -use std::{ - marker::PhantomData, - sync::{Arc, Weak}, -}; - -/// Execution strategies settings. -#[derive(Debug, Clone)] -pub struct ExecutionStrategies { - /// Execution strategy used when syncing. - pub syncing: ExecutionStrategy, - /// Execution strategy used when importing blocks. - pub importing: ExecutionStrategy, - /// Execution strategy used when constructing blocks. - pub block_construction: ExecutionStrategy, - /// Execution strategy used for offchain workers. - pub offchain_worker: ExecutionStrategy, - /// Execution strategy used in other cases. - pub other: ExecutionStrategy, -} - -impl Default for ExecutionStrategies { - fn default() -> ExecutionStrategies { - ExecutionStrategies { - syncing: ExecutionStrategy::NativeElseWasm, - importing: ExecutionStrategy::NativeElseWasm, - block_construction: ExecutionStrategy::AlwaysWasm, - offchain_worker: ExecutionStrategy::NativeWhenPossible, - other: ExecutionStrategy::NativeElseWasm, - } - } -} +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{marker::PhantomData, sync::Arc}; /// Generate the starting set of [`Extensions`]. /// @@ -78,22 +36,12 @@ pub trait ExtensionsFactory: Send + Sync { /// /// - `block_hash`: The hash of the block in the context that extensions will be used. /// - `block_number`: The number of the block in the context that extensions will be used. - /// - `capabilities`: The capabilities - fn extensions_for( - &self, - block_hash: Block::Hash, - block_number: NumberFor, - capabilities: offchain::Capabilities, - ) -> Extensions; + fn extensions_for(&self, block_hash: Block::Hash, block_number: NumberFor) + -> Extensions; } impl ExtensionsFactory for () { - fn extensions_for( - &self, - _: Block::Hash, - _: NumberFor, - _capabilities: offchain::Capabilities, - ) -> Extensions { + fn extensions_for(&self, _: Block::Hash, _: NumberFor) -> Extensions { Extensions::new() } } @@ -103,10 +51,9 @@ impl> ExtensionsFactory for Ve &self, block_hash: Block::Hash, block_number: NumberFor, - capabilities: offchain::Capabilities, ) -> Extensions { let mut exts = Extensions::new(); - exts.extend(self.iter().map(|e| e.extensions_for(block_hash, block_number, capabilities))); + exts.extend(self.iter().map(|e| e.extensions_for(block_hash, block_number))); exts } } @@ -129,12 +76,7 @@ impl ExtensionBeforeBlock { impl ExtensionsFactory for ExtensionBeforeBlock { - fn extensions_for( - &self, - _: Block::Hash, - block_number: NumberFor, - _: offchain::Capabilities, - ) -> Extensions { + fn extensions_for(&self, _: Block::Hash, block_number: NumberFor) -> Extensions { let mut exts = Extensions::new(); if block_number < self.before { @@ -145,181 +87,47 @@ impl ExtensionsFactory } } -/// Create a Offchain DB accessor object. -pub trait DbExternalitiesFactory: Send + Sync { - /// Create [`offchain::DbExternalities`] instance. - fn create(&self) -> Box; -} - -impl DbExternalitiesFactory for T { - fn create(&self) -> Box { - Box::new(self.clone()) - } -} - /// A producer of execution extensions for offchain calls. /// /// This crate aggregates extensions available for the offchain calls /// and is responsible for producing a correct `Extensions` object. /// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { - strategies: ExecutionStrategies, - keystore: Option, - offchain_db: Option>, - // FIXME: these three are only RwLock because of https://github.com/paritytech/substrate/issues/4587 - // remove when fixed. - // To break retain cycle between `Client` and `TransactionPool` we require this - // extension to be a `Weak` reference. - // That's also the reason why it's being registered lazily instead of - // during initialization. - transaction_pool: RwLock>>>, extensions_factory: RwLock>>, - statement_store: RwLock>>, read_runtime_version: Arc, } impl ExecutionExtensions { - /// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`. + /// Create new `ExecutionExtensions` given an `extensions_factory`. pub fn new( - strategies: ExecutionStrategies, - keystore: Option, - offchain_db: Option>, + extensions_factory: Option>>, read_runtime_version: Arc, ) -> Self { - let transaction_pool = RwLock::new(None); - let statement_store = RwLock::new(None); - let extensions_factory = Box::new(()); Self { - strategies, - keystore, - offchain_db, - extensions_factory: RwLock::new(extensions_factory), - transaction_pool, - statement_store, + extensions_factory: extensions_factory + .map(RwLock::new) + .unwrap_or_else(|| RwLock::new(Box::new(()))), read_runtime_version, } } - /// Get a reference to the execution strategies. - pub fn strategies(&self) -> &ExecutionStrategies { - &self.strategies - } - /// Set the new extensions_factory pub fn set_extensions_factory(&self, maker: impl ExtensionsFactory + 'static) { *self.extensions_factory.write() = Box::new(maker); } - /// Register transaction pool extension. - pub fn register_transaction_pool(&self, pool: &Arc) - where - T: OffchainSubmitTransaction + 'static, - { - *self.transaction_pool.write() = Some(Arc::downgrade(pool) as _); - } - - /// Register statement store extension. - pub fn register_statement_store(&self, store: Arc) { - *self.statement_store.write() = Some(Arc::downgrade(&store) as _); - } - /// Based on the execution context and capabilities it produces /// the extensions object to support desired set of APIs. pub fn extensions( &self, block_hash: Block::Hash, block_number: NumberFor, - context: ExecutionContext, ) -> Extensions { - let capabilities = context.capabilities(); - let mut extensions = - self.extensions_factory - .read() - .extensions_for(block_hash, block_number, capabilities); - - if capabilities.contains(offchain::Capabilities::KEYSTORE) { - if let Some(ref keystore) = self.keystore { - extensions.register(KeystoreExt(keystore.clone())); - } - } - - if capabilities.contains(offchain::Capabilities::TRANSACTION_POOL) { - if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) { - extensions.register(TransactionPoolExt(Box::new(TransactionPoolAdapter { - at: BlockId::Hash(block_hash), - pool, - }) as _)); - } - } - - if capabilities.contains(offchain::Capabilities::STATEMENT_STORE) { - if let Some(store) = self.statement_store.read().as_ref().and_then(|x| x.upgrade()) { - extensions.register(sp_statement_store::runtime_api::StatementStoreExt(store)); - } - } - if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) || - capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) - { - if let Some(offchain_db) = self.offchain_db.as_ref() { - extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( - capabilities, - offchain_db.create(), - ))); - } - } - - if let ExecutionContext::OffchainCall(Some(ext)) = context { - extensions.register(OffchainWorkerExt::new(offchain::LimitedExternalities::new( - capabilities, - ext.0, - ))); - } + self.extensions_factory.read().extensions_for(block_hash, block_number); extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); extensions } - - /// Create `ExecutionManager` and `Extensions` for given offchain call. - /// - /// Based on the execution context and capabilities it produces - /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( - &self, - block_hash: Block::Hash, - block_number: NumberFor, - context: ExecutionContext, - ) -> (ExecutionManager>, Extensions) { - let manager = match context { - ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), - ExecutionContext::Syncing => self.strategies.syncing.get_manager(), - ExecutionContext::Importing => self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => - self.strategies.offchain_worker.get_manager(), - ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), - }; - - (manager, self.extensions(block_hash, block_number, context)) - } -} - -/// A wrapper type to pass `BlockId` to the actual transaction pool. -struct TransactionPoolAdapter { - at: BlockId, - pool: Arc>, -} - -impl offchain::TransactionPool for TransactionPoolAdapter { - fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { - let xt = match Block::Extrinsic::decode(&mut &*data) { - Ok(xt) => xt, - Err(e) => { - log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); - return Err(()) - }, - }; - - self.pool.submit_at(&self.at, xt) - } } diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 7c87b479de6a5..807bdf0e33472 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -25,12 +25,12 @@ use sp_core::{ }; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation, - StorageCollection, + Backend as StateBackend, BackendTransaction, ChildStorageCollection, InMemoryBackend, + IndexOperation, StorageCollection, }; use std::{ collections::{HashMap, HashSet}, @@ -479,18 +479,14 @@ impl backend::AuxStore for Blockchain { /// In-memory operation. pub struct BlockImportOperation { pending_block: Option>, - old_state: InMemoryBackend>, - new_state: - Option<> as StateBackend>>::Transaction>, + old_state: InMemoryBackend>, + new_state: Option>>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(Block::Hash, Option)>, set_head: Option, } -impl BlockImportOperation -where - Block::Hash: Ord, -{ +impl BlockImportOperation { fn apply_storage( &mut self, storage: Storage, @@ -519,11 +515,8 @@ where } } -impl backend::BlockImportOperation for BlockImportOperation -where - Block::Hash: Ord, -{ - type State = InMemoryBackend>; +impl backend::BlockImportOperation for BlockImportOperation { + type State = InMemoryBackend>; fn state(&self) -> sp_blockchain::Result> { Ok(Some(&self.old_state)) @@ -545,7 +538,7 @@ where fn update_db_storage( &mut self, - update: > as StateBackend>>::Transaction, + update: BackendTransaction>, ) -> sp_blockchain::Result<()> { self.new_state = Some(update); Ok(()) @@ -611,20 +604,14 @@ where /// /// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this /// > struct for testing purposes. Do **NOT** use in production. -pub struct Backend -where - Block::Hash: Ord, -{ - states: RwLock>>>, +pub struct Backend { + states: RwLock>>>, blockchain: Blockchain, import_lock: RwLock<()>, pinned_blocks: RwLock>, } -impl Backend -where - Block::Hash: Ord, -{ +impl Backend { /// Create a new instance of in-mem backend. /// /// # Warning @@ -650,10 +637,7 @@ where } } -impl backend::AuxStore for Backend -where - Block::Hash: Ord, -{ +impl backend::AuxStore for Backend { fn insert_aux< 'a, 'b: 'a, @@ -673,13 +657,10 @@ where } } -impl backend::Backend for Backend -where - Block::Hash: Ord, -{ +impl backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; - type State = InMemoryBackend>; + type State = InMemoryBackend>; type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { @@ -809,7 +790,7 @@ where } } -impl backend::LocalBackend for Backend where Block::Hash: Ord {} +impl backend::LocalBackend for Backend {} /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 0faddc10fe016..faadf3663a59d 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -36,7 +36,7 @@ pub use proof_provider::*; pub use sp_blockchain as blockchain; pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{CompactProof, ExecutionStrategy, StorageProof}; +pub use sp_state_machine::{CompactProof, StorageProof}; pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; /// Usage Information Provider interface diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 8b9fb743b4ade..0324cac466912 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" @@ -30,16 +30,15 @@ thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network = { version = "0.10.0-dev", path = "../network/" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } async-trait = "0.1.56" [dev-dependencies] quickcheck = { version = "1.0.3", default-features = false } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index c4f1d2e245f92..dde2e15bd6474 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" @@ -26,9 +26,9 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../client/transact sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } [dev-dependencies] parking_lot = "0.12.1" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 642900d2f35d8..b3a8f0d8970b6 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -57,6 +57,8 @@ pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512; const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50); +const LOG_TARGET: &'static str = "basic-authorship"; + /// [`Proposer`] factory. pub struct ProposerFactory { spawn_handle: Box, @@ -185,8 +187,7 @@ where + Send + Sync + 'static, - C::Api: - ApiExt> + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { fn init_with_now( &mut self, @@ -227,8 +228,7 @@ where + Send + Sync + 'static, - C::Api: - ApiExt> + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { type CreateProposer = future::Ready>; @@ -267,17 +267,11 @@ where + Send + Sync + 'static, - C::Api: - ApiExt> + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { - type Transaction = backend::TransactionFor; - type Proposal = Pin< - Box< - dyn Future, Self::Error>> - + Send, - >, - >; + type Proposal = + Pin, Self::Error>> + Send>>; type Error = sp_blockchain::Error; type ProofRecording = PR; type Proof = PR::Proof; @@ -302,7 +296,10 @@ where .propose_with(inherent_data, inherent_digests, deadline, block_size_limit) .await; if tx.send(res).is_err() { - trace!("Could not send block production result to proposer!"); + trace!( + target: LOG_TARGET, + "Could not send block production result to proposer!" + ); } }), ); @@ -327,8 +324,7 @@ where + Send + Sync + 'static, - C::Api: - ApiExt> + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, PR: ProofRecording, { async fn propose_with( @@ -337,12 +333,35 @@ where inherent_digests: Digest, deadline: time::Instant, block_size_limit: Option, - ) -> Result, PR::Proof>, sp_blockchain::Error> - { - let propose_with_start = time::Instant::now(); + ) -> Result, sp_blockchain::Error> { + let propose_with_timer = time::Instant::now(); let mut block_builder = self.client.new_block_at(self.parent_hash, inherent_digests, PR::ENABLED)?; + self.apply_inherents(&mut block_builder, inherent_data)?; + + // TODO call `after_inherents` and check if we should apply extrinsincs here + // + + let block_timer = time::Instant::now(); + let end_reason = + self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?; + let (block, storage_changes, proof) = block_builder.build()?.into_inner(); + let block_took = block_timer.elapsed(); + + let proof = + PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; + + self.print_summary(&block, end_reason, block_took, propose_with_timer.elapsed()); + Ok(Proposal { block, proof, storage_changes }) + } + + /// Apply all inherents to the block. + fn apply_inherents( + &self, + block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C, B>, + inherent_data: InherentData, + ) -> Result<(), sp_blockchain::Error> { let create_inherents_start = time::Instant::now(); let inherents = block_builder.create_inherents(inherent_data)?; let create_inherents_end = time::Instant::now(); @@ -358,7 +377,10 @@ where for inherent in inherents { match block_builder.push(inherent) { Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { - warn!("⚠️ Dropping non-mandatory inherent from overweight block.") + warn!( + target: LOG_TARGET, + "⚠️ Dropping non-mandatory inherent from overweight block." + ) }, Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { error!( @@ -367,12 +389,24 @@ where return Err(ApplyExtrinsicFailed(Validity(e))) }, Err(e) => { - warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); + warn!( + target: LOG_TARGET, + "❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e + ); }, Ok(_) => {}, } } + Ok(()) + } + /// Apply as many extrinsics as possible to the block. + async fn apply_extrinsics( + &self, + block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C, B>, + deadline: time::Instant, + block_size_limit: Option, + ) -> Result { // proceed with transactions // We calculate soft deadline used only in case we start skipping transactions. let now = (self.now)(); @@ -380,7 +414,6 @@ where let left_micros: u64 = left.as_micros().saturated_into(); let soft_deadline = now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros)); - let block_timer = time::Instant::now(); let mut skipped = 0; let mut unqueue_invalid = Vec::new(); @@ -391,7 +424,7 @@ where let mut pending_iterator = select! { res = t1 => res, _ = t2 => { - log::warn!( + warn!(target: LOG_TARGET, "Timeout fired waiting for transaction pool at block #{}. \ Proceeding with production.", self.parent_number, @@ -402,8 +435,8 @@ where let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit); - debug!("Attempting to push transactions from the pool."); - debug!("Pool status: {:?}", self.transaction_pool.status()); + debug!(target: LOG_TARGET, "Attempting to push transactions from the pool."); + debug!(target: LOG_TARGET, "Pool status: {:?}", self.transaction_pool.status()); let mut transaction_pushed = false; let end_reason = loop { @@ -416,8 +449,9 @@ where let now = (self.now)(); if now > deadline { debug!( + target: LOG_TARGET, "Consensus deadline reached when pushing block transactions, \ - proceeding with proposing." + proceeding with proposing." ); break EndProposingReason::HitDeadline } @@ -432,51 +466,62 @@ where if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( + target: LOG_TARGET, "Transaction would overflow the block size limit, \ - but will try {} more transactions before quitting.", + but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); continue } else if now < soft_deadline { debug!( + target: LOG_TARGET, "Transaction would overflow the block size limit, \ - but we still have time before the soft deadline, so \ - we will try a bit more." + but we still have time before the soft deadline, so \ + we will try a bit more." ); continue } else { - debug!("Reached block size limit, proceeding with proposing."); + debug!( + target: LOG_TARGET, + "Reached block size limit, proceeding with proposing." + ); break EndProposingReason::HitBlockSizeLimit } } - trace!("[{:?}] Pushing to the block.", pending_tx_hash); - match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { + trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash); + match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) { Ok(()) => { transaction_pushed = true; - debug!("[{:?}] Pushed to the block.", pending_tx_hash); + debug!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { pending_iterator.report_invalid(&pending_tx); if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; - debug!( + debug!(target: LOG_TARGET, "Block seems full, but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); } else if (self.now)() < soft_deadline { - debug!( + debug!(target: LOG_TARGET, "Block seems full, but we still have time before the soft deadline, \ so we will try a bit more before quitting." ); } else { - debug!("Reached block weight limit, proceeding with proposing."); + debug!( + target: LOG_TARGET, + "Reached block weight limit, proceeding with proposing." + ); break EndProposingReason::HitBlockWeightLimit } }, Err(e) => { pending_iterator.report_invalid(&pending_tx); - debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); + debug!( + target: LOG_TARGET, + "[{:?}] Invalid transaction: {}", pending_tx_hash, e + ); unqueue_invalid.push(pending_tx_hash); }, } @@ -484,34 +529,51 @@ where if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed { warn!( - "Hit block size limit of `{}` without including any transaction!", - block_size_limit, + target: LOG_TARGET, + "Hit block size limit of `{}` without including any transaction!", block_size_limit, ); } self.transaction_pool.remove_invalid(&unqueue_invalid); + Ok(end_reason) + } - let (block, storage_changes, proof) = block_builder.build()?.into_inner(); - + /// Prints a summary and does telemetry + metrics. + fn print_summary( + &self, + block: &Block, + end_reason: EndProposingReason, + block_took: time::Duration, + propose_with_took: time::Duration, + ) { + let extrinsics = block.extrinsics(); self.metrics.report(|metrics| { - metrics.number_of_transactions.set(block.extrinsics().len() as u64); - metrics.block_constructed.observe(block_timer.elapsed().as_secs_f64()); - + metrics.number_of_transactions.set(extrinsics.len() as u64); + metrics.block_constructed.observe(block_took.as_secs_f64()); metrics.report_end_proposing_reason(end_reason); + metrics.create_block_proposal_time.observe(propose_with_took.as_secs_f64()); }); + let extrinsics_summary = if extrinsics.is_empty() { + "no extrinsics".to_string() + } else { + format!( + "extrinsics ({}): [{}]", + extrinsics.len(), + extrinsics + .iter() + .map(|xt| BlakeTwo256::hash_of(xt).to_string()) + .collect::>() + .join(", ") + ) + }; + info!( - "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; {extrinsics_summary}", block.header().number(), - block_timer.elapsed().as_millis(), + block_took.as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), - block.extrinsics().len(), - block.extrinsics() - .iter() - .map(|xt| BlakeTwo256::hash_of(xt).to_string()) - .collect::>() - .join(", ") ); telemetry!( self.telemetry; @@ -520,18 +582,6 @@ where "number" => ?block.header().number(), "hash" => ?::Hash::from(block.header().hash()), ); - - let proof = - PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; - - let propose_with_end = time::Instant::now(); - self.metrics.report(|metrics| { - metrics.create_block_proposal_time.observe( - propose_with_end.saturating_duration_since(propose_with_start).as_secs_f64(), - ); - }); - - Ok(Proposal { block, proof, storage_changes }) } } diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index d009826b2fae4..18141b044188d 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } [dev-dependencies] -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index f055d4688822a..1878e76274804 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -32,10 +32,10 @@ use sp_api::{ ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; -use sp_core::ExecutionContext; +use sp_core::traits::CallContext; use sp_runtime::{ legacy, - traits::{Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One}, + traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, Digest, }; @@ -85,20 +85,18 @@ impl From for RecordProof { /// backend to get the state of the block. Furthermore an optional `proof` is included which /// can be used to proof that the build block contains the expected data. The `proof` will /// only be set when proof recording was activated. -pub struct BuiltBlock>> { +pub struct BuiltBlock { /// The actual block that was build. pub block: Block, /// The changes that need to be applied to the backend to get the state of the build block. - pub storage_changes: StorageChanges, + pub storage_changes: StorageChanges, /// An optional proof that was recorded while building the block. pub proof: Option, } -impl>> - BuiltBlock -{ +impl BuiltBlock { /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { + pub fn into_inner(self) -> (Block, StorageChanges, Option) { (self.block, self.storage_changes, self.proof) } } @@ -145,8 +143,7 @@ impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> where Block: BlockT, A: ProvideRuntimeApi + 'a, - A::Api: - BlockBuilderApi + ApiExt>, + A::Api: BlockBuilderApi + ApiExt, B: backend::Backend, { /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. @@ -178,11 +175,9 @@ where api.record_proof(); } - api.initialize_block_with_context( - parent_hash, - ExecutionContext::BlockConstruction, - &header, - )?; + api.set_call_context(CallContext::Onchain); + + api.initialize_block(parent_hash, &header)?; let version = api .api_version::>(parent_hash)? @@ -209,18 +204,10 @@ where self.api.execute_in_transaction(|api| { let res = if version < 6 { #[allow(deprecated)] - api.apply_extrinsic_before_version_6_with_context( - parent_hash, - ExecutionContext::BlockConstruction, - xt.clone(), - ) - .map(legacy::byte_sized_error::convert_to_latest) + api.apply_extrinsic_before_version_6(parent_hash, xt.clone()) + .map(legacy::byte_sized_error::convert_to_latest) } else { - api.apply_extrinsic_with_context( - parent_hash, - ExecutionContext::BlockConstruction, - xt.clone(), - ) + api.apply_extrinsic(parent_hash, xt.clone()) }; match res { @@ -241,14 +228,12 @@ where /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result>, Error> { - let header = self - .api - .finalize_block_with_context(self.parent_hash, ExecutionContext::BlockConstruction)?; + pub fn build(mut self) -> Result, Error> { + let header = self.api.finalize_block(self.parent_hash)?; debug_assert_eq!( header.extrinsics_root().clone(), - HashFor::::ordered_trie_root( + HashingFor::::ordered_trie_root( self.extrinsics.iter().map(Encode::encode).collect(), sp_runtime::StateVersion::V0, ), @@ -282,11 +267,7 @@ where .execute_in_transaction(move |api| { // `create_inherents` should not change any state, to ensure this we always rollback // the transaction. - TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( - parent_hash, - ExecutionContext::BlockConstruction, - inherent_data, - )) + TransactionOutcome::Rollback(api.inherent_extrinsics(parent_hash, inherent_data)) }) .map_err(|e| Error::Application(Box::new(e))) } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index b1188b3bd4625..1b4ce6e44605a 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] memmap2 = "0.5.0" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } @@ -22,6 +22,6 @@ sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index c16cd45a99433..537f8aee6ab60 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = "2.0.14" +quote = "1.0.28" +syn = "2.0.16" diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 6239eb7326b78..341c5f28e4a44 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -116,15 +116,19 @@ //! ```json //! // The human readable name of the chain. //! "name": "Flaming Fir", +//! //! // The id of the chain. //! "id": "flamingfir9", +//! //! // The chain type of this chain. //! // Possible values are `Live`, `Development`, `Local`. //! "chainType": "Live", +//! //! // A list of multi addresses that belong to boot nodes of the chain. //! "bootNodes": [ //! "/dns/0.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", //! ], +//! //! // Optional list of "multi address, verbosity" of telemetry endpoints. //! // The verbosity goes from `0` to `9`. With `0` being the mode with the lowest verbosity. //! "telemetryEndpoints": [ @@ -133,19 +137,24 @@ //! 0 //! ] //! ], +//! //! // Optional networking protocol id that identifies the chain. //! "protocolId": "fir9", +//! //! // Optional fork id. Should most likely be left empty. //! // Can be used to signal a fork on the network level when two chains have the //! // same genesis hash. //! "forkId": "random_fork", +//! //! // Custom properties. //! "properties": { //! "tokenDecimals": 15, //! "tokenSymbol": "FIR" //! }, +//! //! // Deprecated field. Should be ignored. //! "consensusEngine": null, +//! //! // The genesis declaration of the chain. //! // //! // `runtime`, `raw`, `stateRootHash` denote the type of the genesis declaration. @@ -159,6 +168,7 @@ //! // type depends on the hash used by the chain. //! // //! "genesis": { "runtime": {} }, +//! //! /// Optional map of `block_number` to `wasm_code`. //! /// //! /// The given `wasm_code` will be used to substitute the on-chain wasm code starting with the @@ -172,6 +182,8 @@ //! "codeSubstitutes": [], //! ``` //! +//! See [`ChainSpec`] for a trait representation of the above. +//! //! The chain spec can be extended with other fields that are opaque to the default chain spec. //! Specific node implementations will need to be able to deserialize these extensions. diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 49e0765dd29d0..61750766fa3f8 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" chrono = "0.4.10" clap = { version = "4.2.5", features = ["derive", "string"] } fdlimit = "0.2.1" @@ -21,11 +21,11 @@ futures = "0.3.21" libp2p-identity = { version = "0.1.2", features = ["peerid", "ed25519"]} log = "0.4.17" names = { version = "0.13.0", default-features = false } -parity-scale-codec = "3.2.2" +parity-scale-codec = "3.6.1" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" -serde = "1.0.136" +serde = "1.0.163" serde_json = "1.0.85" thiserror = "1.0.30" tiny-bip39 = "1.0.0" @@ -34,23 +34,22 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keyring = { version = "7.0.0", path = "../../primitives/keyring" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } -sp-panic-handler = { version = "5.0.0", path = "../../primitives/panic-handler" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keyring = { version = "24.0.0", path = "../../primitives/keyring" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } +sp-panic-handler = { version = "8.0.0", path = "../../primitives/panic-handler" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } [dev-dependencies] tempfile = "3.1.0" futures-timer = "3.0.1" -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } [features] default = ["rocksdb"] diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 7e2498cec4b7f..40d86fd97988d 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -54,7 +54,7 @@ pub const DEFAULT_WASMTIME_INSTANTIATION_STRATEGY: WasmtimeInstantiationStrategy #[derive(Debug, Clone, Copy, ValueEnum)] #[value(rename_all = "kebab-case")] pub enum WasmExecutionMethod { - /// Uses an interpreter. + /// Uses an interpreter which now is deprecated. #[clap(name = "interpreted-i-know-what-i-do")] Interpreted, /// Uses a compiled runtime. @@ -76,21 +76,24 @@ pub fn execution_method_from_cli( execution_method: WasmExecutionMethod, instantiation_strategy: WasmtimeInstantiationStrategy, ) -> sc_service::config::WasmExecutionMethod { - match execution_method { - WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, - WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled { - instantiation_strategy: match instantiation_strategy { - WasmtimeInstantiationStrategy::PoolingCopyOnWrite => - sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite, - WasmtimeInstantiationStrategy::Pooling => - sc_service::config::WasmtimeInstantiationStrategy::Pooling, - WasmtimeInstantiationStrategy::RecreateInstance => - sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, - WasmtimeInstantiationStrategy::LegacyInstanceReuse => - sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, - }, + if let WasmExecutionMethod::Interpreted = execution_method { + log::warn!( + "`interpreted-i-know-what-i-do` is deprecated and will be removed in the future. Defaults to `compiled` execution mode." + ); + } + + sc_service::config::WasmExecutionMethod::Compiled { + instantiation_strategy: match instantiation_strategy { + WasmtimeInstantiationStrategy::PoolingCopyOnWrite => + sc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite, + WasmtimeInstantiationStrategy::Pooling => + sc_service::config::WasmtimeInstantiationStrategy::Pooling, + WasmtimeInstantiationStrategy::RecreateInstance => + sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, + WasmtimeInstantiationStrategy::LegacyInstanceReuse => + sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, }, } } @@ -158,17 +161,6 @@ pub enum ExecutionStrategy { NativeElseWasm, } -impl Into for ExecutionStrategy { - fn into(self) -> sc_client_api::ExecutionStrategy { - match self { - ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible, - ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm, - ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both, - ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm, - } - } -} - /// Available RPC methods. #[allow(missing_docs)] #[derive(Debug, Copy, Clone, PartialEq, ValueEnum)] @@ -255,24 +247,15 @@ impl Into for SyncMode { fn into(self) -> sc_network::config::SyncMode { match self { SyncMode::Full => sc_network::config::SyncMode::Full, - SyncMode::Fast => - sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, - SyncMode::FastUnsafe => - sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, + SyncMode::Fast => sc_network::config::SyncMode::LightState { + skip_proofs: false, + storage_chain_mode: false, + }, + SyncMode::FastUnsafe => sc_network::config::SyncMode::LightState { + skip_proofs: true, + storage_chain_mode: false, + }, SyncMode::Warp => sc_network::config::SyncMode::Warp, } } } - -/// Default value for the `--execution-syncing` parameter. -pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::Wasm; -/// Default value for the `--execution-import-block` parameter. -pub const DEFAULT_EXECUTION_IMPORT_BLOCK: ExecutionStrategy = ExecutionStrategy::Wasm; -/// Default value for the `--execution-import-block` parameter when the node is a validator. -pub const DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR: ExecutionStrategy = ExecutionStrategy::Wasm; -/// Default value for the `--execution-block-construction` parameter. -pub const DEFAULT_EXECUTION_BLOCK_CONSTRUCTION: ExecutionStrategy = ExecutionStrategy::Wasm; -/// Default value for the `--execution-offchain-worker` parameter. -pub const DEFAULT_EXECUTION_OFFCHAIN_WORKER: ExecutionStrategy = ExecutionStrategy::Wasm; -/// Default value for the `--execution-other` parameter. -pub const DEFAULT_EXECUTION_OTHER: ExecutionStrategy = ExecutionStrategy::Wasm; diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index ba20376d998a9..dc63d46bd19af 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -54,7 +54,6 @@ impl CheckBlockCmd { B: BlockT + for<'de> serde::Deserialize<'de>, C: BlockBackend + HeaderBackend + Send + Sync + 'static, IQ: sc_service::ImportQueue + 'static, - B::Hash: FromStr, ::Err: Debug, <::Number as FromStr>::Err: Debug, { diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 45196c1192c84..4f9e625d702e6 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -59,7 +59,6 @@ impl ExportStateCmd { B: BlockT, C: UsageProvider + StorageProvider + HeaderBackend, BA: sc_client_api::backend::Backend, - B::Hash: FromStr, ::Err: Debug, <::Number as FromStr>::Err: Debug, { diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index c3579f7dc826e..43851dc1af5cc 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -55,7 +55,7 @@ impl GenerateNodeKeyCmd { let file_data = if self.bin { secret.as_ref().to_owned() } else { - array_bytes::bytes2hex("", secret.as_ref()).into_bytes() + array_bytes::bytes2hex("", secret).into_bytes() }; match &self.file { diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index fa9d125d33108..732d874319a83 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -125,10 +125,6 @@ mod tests { "test".into() } - fn native_runtime_version(_: &Box) -> &'static sp_version::RuntimeVersion { - unimplemented!("Not required in tests") - } - fn load_spec(&self, _: &str) -> std::result::Result, String> { Ok(Box::new(GenericChainSpec::from_genesis( "test", diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index d004fc1beb097..9d48d2bdf644f 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -15,6 +15,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +//! Various subcommands that can be included in a substrate-based chain's CLI. + mod build_spec_cmd; mod chain_info_cmd; mod check_block_cmd; diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index df5d93a7e944e..34e8c1036c59f 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -18,7 +18,7 @@ use crate::{ error, - params::{GenericNumber, PruningParams, SharedParams}, + params::{DatabaseParams, GenericNumber, PruningParams, SharedParams}, CliConfiguration, }; use clap::Parser; @@ -41,6 +41,10 @@ pub struct RevertCmd { #[allow(missing_docs)] #[clap(flatten)] pub pruning_params: PruningParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub database_params: DatabaseParams, } /// Revert handler for auxiliary data (e.g. consensus). @@ -79,4 +83,8 @@ impl CliConfiguration for RevertCmd { fn pruning_params(&self) -> Option<&PruningParams> { Some(&self.pruning_params) } + + fn database_params(&self) -> Option<&DatabaseParams> { + Some(&self.database_params) + } } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 975f7638191ea..5dda488b13330 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -394,6 +394,11 @@ impl CliConfiguration for RunCmd { /// Check whether a node name is considered as valid. pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); + + if name.is_empty() { + return Err("Node name cannot be empty") + } + if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { return Err("Node name too long") } @@ -404,7 +409,7 @@ pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { return Err("Node name should not contain invalid chars such as '.' and '@'") } - let invalid_patterns = r"(https?:\\/+)?(www)+"; + let invalid_patterns = r"^https?:"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { return Err("Node name should not contain urls") @@ -490,18 +495,32 @@ mod tests { #[test] fn tests_node_name_good() { assert!(is_node_name_valid("short name").is_ok()); + assert!(is_node_name_valid("www").is_ok()); + assert!(is_node_name_valid("aawww").is_ok()); + assert!(is_node_name_valid("wwwaa").is_ok()); + assert!(is_node_name_valid("www aa").is_ok()); } #[test] fn tests_node_name_bad() { + assert!(is_node_name_valid("").is_err()); assert!(is_node_name_valid( "very very long names are really not very cool for the ui at all, really they're not" ) .is_err()); assert!(is_node_name_valid("Dots.not.Ok").is_err()); - assert!(is_node_name_valid("http://visit.me").is_err()); - assert!(is_node_name_valid("https://visit.me").is_err()); + // NOTE: the urls below don't include a domain otherwise + // they'd get filtered for including a `.` + assert!(is_node_name_valid("http://visitme").is_err()); + assert!(is_node_name_valid("http:/visitme").is_err()); + assert!(is_node_name_valid("http:visitme").is_err()); + assert!(is_node_name_valid("https://visitme").is_err()); + assert!(is_node_name_valid("https:/visitme").is_err()); + assert!(is_node_name_valid("https:visitme").is_err()); assert!(is_node_name_valid("www.visit.me").is_err()); + assert!(is_node_name_valid("www.visit").is_err()); + assert!(is_node_name_valid("hello\\world").is_err()); + assert!(is_node_name_valid("visit.www").is_err()); assert!(is_node_name_valid("email@domain").is_err()); } } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 04c62a73b40ed..4d218da6aa898 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -24,7 +24,6 @@ use crate::{ }; use log::warn; use names::{Generator, Name}; -use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ BasePath, Configuration, DatabaseSource, KeystoreConfig, NetworkConfiguration, @@ -291,21 +290,6 @@ pub trait CliConfiguration: Sized { self.import_params().map(|x| x.wasm_runtime_overrides()).unwrap_or_default() } - /// Get the execution strategies. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its - /// `ExecutionStrategies::default()`. - fn execution_strategies( - &self, - is_dev: bool, - is_validator: bool, - ) -> Result { - Ok(self - .import_params() - .map(|x| x.execution_strategies(is_dev, is_validator)) - .unwrap_or_default()) - } - /// Get the RPC address. fn rpc_addr(&self, _default_listen_port: u16) -> Result> { Ok(None) @@ -508,7 +492,6 @@ pub trait CliConfiguration: Sized { blocks_pruning: self.blocks_pruning()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), - execution_strategies: self.execution_strategies(is_dev, is_validator)?, rpc_addr: self.rpc_addr(DCV::rpc_listen_port())?, rpc_methods: self.rpc_methods()?, rpc_max_connections: self.rpc_max_connections()?, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 5d451bbed6562..104e8ec8b798e 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -17,6 +17,8 @@ // along with this program. If not, see . //! Substrate CLI library. +//! +//! To see a full list of commands available, see [`commands`]. #![warn(missing_docs)] #![warn(unused_extern_crates)] @@ -26,7 +28,7 @@ use clap::{CommandFactory, FromArgMatches, Parser}; use sc_service::Configuration; pub mod arg_enums; -mod commands; +pub mod commands; mod config; mod error; mod params; @@ -248,6 +250,4 @@ pub trait SubstrateCli: Sized { command.init(&Self::support_url(), &Self::impl_version(), logger_hook, &config)?; Runner::new(config, tokio_runtime, signals) } - /// Native runtime version. - fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 9e57a017e51ca..bfa54a35058f6 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -19,15 +19,11 @@ use crate::{ arg_enums::{ ExecutionStrategy, WasmExecutionMethod, WasmtimeInstantiationStrategy, - DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, - DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, DEFAULT_EXECUTION_OFFCHAIN_WORKER, - DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, }, params::{DatabaseParams, PruningParams}, }; use clap::Args; -use sc_client_api::execution_extensions::ExecutionStrategies; use std::path::PathBuf; /// Parameters for block import. @@ -104,6 +100,8 @@ impl ImportParams { /// Get the WASM execution method from the parameters pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod { + self.execution_strategies.check_usage_and_print_deprecation_warning(); + crate::execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy) } @@ -112,36 +110,6 @@ impl ImportParams { pub fn wasm_runtime_overrides(&self) -> Option { self.wasm_runtime_overrides.clone() } - - /// Get execution strategies for the parameters - pub fn execution_strategies(&self, is_dev: bool, is_validator: bool) -> ExecutionStrategies { - let exec = &self.execution_strategies; - let exec_all_or = |strat: Option, default: ExecutionStrategy| { - let default = if is_dev { ExecutionStrategy::Native } else { default }; - - exec.execution.unwrap_or_else(|| strat.unwrap_or(default)).into() - }; - - let default_execution_import_block = if is_validator { - DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR - } else { - DEFAULT_EXECUTION_IMPORT_BLOCK - }; - - ExecutionStrategies { - syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), - importing: exec_all_or(exec.execution_import_block, default_execution_import_block), - block_construction: exec_all_or( - exec.execution_block_construction, - DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, - ), - offchain_worker: exec_all_or( - exec.execution_offchain_worker, - DEFAULT_EXECUTION_OFFCHAIN_WORKER, - ), - other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), - } - } } /// Execution strategies parameters. @@ -186,3 +154,23 @@ pub struct ExecutionStrategiesParams { )] pub execution: Option, } + +impl ExecutionStrategiesParams { + /// Check if one of the parameters is still passed and print a warning if so. + fn check_usage_and_print_deprecation_warning(&self) { + for (param, name) in [ + (&self.execution_syncing, "execution-syncing"), + (&self.execution_import_block, "execution-import-block"), + (&self.execution_block_construction, "execution-block-construction"), + (&self.execution_offchain_worker, "execution-offchain-worker"), + (&self.execution_other, "execution-other"), + (&self.execution, "execution"), + ] { + if param.is_some() { + eprintln!( + "CLI parameter `--{name}` has no effect anymore and will be removed in the future!" + ); + } + } + } +} diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 247ffc0e04ba5..a73bd8844fec4 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -113,7 +113,6 @@ impl BlockNumberOrHash { /// Parse the inner value as `BlockId`. pub fn parse(&self) -> Result, String> where - B::Hash: FromStr, ::Err: std::fmt::Debug, NumberFor: FromStr, as FromStr>::Err: std::fmt::Debug, diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index a974b86026116..84db218cc51da 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -28,7 +28,7 @@ use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, }; -use std::{borrow::Cow, path::PathBuf}; +use std::{borrow::Cow, num::NonZeroUsize, path::PathBuf}; /// Parameters used to create the network configuration. #[derive(Debug, Clone, Args)] @@ -121,6 +121,13 @@ pub struct NetworkParams { #[arg(long)] pub kademlia_disjoint_query_paths: bool, + /// Kademlia replication factor determines to how many closest peers a record is replicated to. + /// + /// Discovery mechanism requires successful replication to all + /// `kademlia_replication_factor` peers to consider record successfully put. + #[arg(long, default_value = "20")] + pub kademlia_replication_factor: NonZeroUsize, + /// Join the IPFS network and serve transactions over bitswap protocol. #[arg(long)] pub ipfs_server: bool, @@ -233,6 +240,7 @@ impl NetworkParams { enable_dht_random_walk: !self.reserved_only, allow_non_globals_in_dht, kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, + kademlia_replication_factor: self.kademlia_replication_factor, yamux_window_size: None, ipfs_server: self.ipfs_server, sync_mode: self.sync.into(), diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 7b534b37192ad..59f53200a192b 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -187,23 +187,18 @@ pub fn print_node_infos(config: &Configuration) { .path() .map_or_else(|| "".to_owned(), |p| p.display().to_string()) ); - info!("⛓ Native runtime: {}", C::native_runtime_version(&config.chain_spec)); } #[cfg(test)] mod tests { + use super::*; + use sc_network::config::NetworkConfiguration; + use sc_service::{Arc, ChainType, GenericChainSpec, NoExtension}; use std::{ path::PathBuf, sync::atomic::{AtomicU64, Ordering}, }; - use sc_network::config::NetworkConfiguration; - use sc_service::{Arc, ChainType, GenericChainSpec, NoExtension}; - use sp_runtime::create_runtime_str; - use sp_version::create_apis_vec; - - use super::*; - struct Cli; impl SubstrateCli for Cli { @@ -237,23 +232,6 @@ mod tests { ) -> std::result::Result, String> { Err("nope".into()) } - - fn native_runtime_version( - _: &Box, - ) -> &'static sp_version::RuntimeVersion { - const VERSION: sp_version::RuntimeVersion = sp_version::RuntimeVersion { - spec_name: create_runtime_str!("spec"), - impl_name: create_runtime_str!("name"), - authoring_version: 0, - spec_version: 0, - impl_version: 0, - apis: create_apis_vec!([]), - transaction_version: 2, - state_version: 0, - }; - - &VERSION - } } fn create_runner() -> Runner { @@ -287,7 +265,6 @@ mod tests { )), wasm_method: Default::default(), wasm_runtime_overrides: None, - execution_strategies: Default::default(), rpc_addr: None, rpc_max_connections: Default::default(), rpc_cors: None, @@ -423,7 +400,7 @@ mod tests { }, ); - let Some(output) = output else { return } ; + let Some(output) = output else { return }; let stderr = dbg!(String::from_utf8(output.stderr).unwrap()); diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 4c0305e9f66e7..70581e8c80d5e 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" thiserror = "1.0" @@ -25,16 +25,16 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] parking_lot = "0.12.1" @@ -42,8 +42,8 @@ tempfile = "3.1.0" sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tokio = { version = "1.22.0" } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index ef7a2a1cc865b..a8777ef8788cc 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -22,7 +22,7 @@ use crate::{ authorities, standalone::SealVerificationError, AuthorityId, CompatibilityMode, Error, LOG_TARGET, }; -use codec::{Codec, Decode, Encode}; +use codec::Codec; use log::{debug, info, trace}; use prometheus_endpoint::Registry; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; @@ -38,13 +38,13 @@ use sp_blockchain::HeaderBackend; use sp_consensus::Error as ConsensusError; use sp_consensus_aura::{inherents::AuraInherentData, AuraApi}; use sp_consensus_slots::Slot; -use sp_core::{crypto::Pair, ExecutionContext}; +use sp_core::crypto::Pair; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ traits::{Block as BlockT, Header, NumberFor}, DigestItem, }; -use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; /// check a header has been signed by the right key. If the slot is too far in the future, an error /// will be returned. If it's successful, returns the pre-header and the digest item @@ -60,9 +60,9 @@ fn check_header( check_for_equivocation: CheckForEquivocation, ) -> Result, Error> where + P::Public: Codec, P::Signature: Codec, C: sc_client_api::backend::AuxStore, - P::Public: Encode + Decode + PartialEq + Clone, { let check_result = crate::standalone::check_header_slot_and_seal::(slot_now, header, authorities); @@ -101,11 +101,11 @@ where /// A verifier for Aura blocks. pub struct AuraVerifier { client: Arc, - phantom: PhantomData

, create_inherent_data_providers: CIDP, check_for_equivocation: CheckForEquivocation, telemetry: Option, compatibility_mode: CompatibilityMode, + _phantom: PhantomData P>, } impl AuraVerifier { @@ -122,14 +122,13 @@ impl AuraVerifier { check_for_equivocation, telemetry, compatibility_mode, - phantom: PhantomData, + _phantom: PhantomData, } } } impl AuraVerifier where - P: Send + Sync + 'static, CIDP: Send, { async fn check_inherents( @@ -138,7 +137,6 @@ where at_hash: B::Hash, inherent_data: sp_inherents::InherentData, create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, ) -> Result<(), Error> where C: ProvideRuntimeApi, @@ -148,7 +146,7 @@ where let inherent_res = self .client .runtime_api() - .check_inherents_with_context(at_hash, execution_context, block, inherent_data) + .check_inherents(at_hash, block, inherent_data) .map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { @@ -169,16 +167,16 @@ impl Verifier for AuraVerifier + Send + Sync + sc_client_api::backend::AuxStore, C::Api: BlockBuilderApi + AuraApi> + ApiExt, - P: Pair + Send + Sync + 'static, - P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, - P::Signature: Encode + Decode, + P: Pair, + P::Public: Codec + Debug, + P::Signature: Codec, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { // Skip checks that include execution, if being told so or when importing only state. // // This is done for example when gap syncing and it is expected that the block after the gap @@ -249,7 +247,6 @@ where parent_hash, inherent_data, create_inherent_data_providers, - block.origin.into(), ) .await .map_err(|e| e.to_string())?; @@ -351,7 +348,7 @@ pub fn import_queue( telemetry, compatibility_mode, }: ImportQueueParams, -) -> Result, sp_consensus::Error> +) -> Result, sp_consensus::Error> where Block: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, @@ -363,13 +360,10 @@ where + AuxStore + UsageProvider + HeaderBackend, - I: BlockImport> - + Send - + Sync - + 'static, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, - P::Signature: Encode + Decode, + I: BlockImport + Send + Sync + 'static, + P: Pair + 'static, + P::Public: Codec + Debug, + P::Signature: Codec, S: sp_core::traits::SpawnEssentialNamed, CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 1dc364283d5b6..a77f00d08d3e0 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -30,12 +30,11 @@ //! //! NOTE: Aura itself is designed to be generic over the crypto used. #![forbid(missing_docs, unsafe_code)] -use std::{fmt::Debug, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; +use std::{fmt::Debug, marker::PhantomData, pin::Pin, sync::Arc}; +use codec::Codec; use futures::prelude::*; -use codec::{Codec, Decode, Encode}; - use sc_client_api::{backend::AuxStore, BlockOf}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_slots::{ @@ -48,7 +47,7 @@ use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; use sp_consensus_slots::Slot; -use sp_core::crypto::{Pair, Public}; +use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header, Member, NumberFor}; @@ -172,16 +171,16 @@ pub fn start_aura( }: StartAuraParams>, ) -> Result, ConsensusError> where - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, + P: Pair, + P::Public: AppPublic + Member, + P::Signature: TryFrom> + Member + Codec, B: BlockT, C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, - I: BlockImport> + Send + Sync + 'static, + I: BlockImport + Send + Sync + 'static, PF: Environment + Send + Sync + 'static, - PF::Proposer: Proposer>, + PF::Proposer: Proposer, SO: SyncOracle + Send + Sync + Clone, L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders + Send + 'static, @@ -280,11 +279,11 @@ where C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, PF: Environment + Send + Sync + 'static, - PF::Proposer: Proposer>, - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, - I: BlockImport> + Send + Sync + 'static, + PF::Proposer: Proposer, + P: Pair, + P::Public: AppPublic + Member, + P::Signature: TryFrom> + Member + Codec, + I: BlockImport + Send + Sync + 'static, Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, L: sc_consensus::JustificationSyncLink, @@ -303,7 +302,7 @@ where block_proposal_slot_portion, max_block_proposal_slot_portion, compatibility_mode, - _key_type: PhantomData::

, + _phantom: PhantomData:: P>, } } @@ -320,7 +319,7 @@ struct AuraWorker { max_block_proposal_slot_portion: Option, telemetry: Option, compatibility_mode: CompatibilityMode, - _key_type: PhantomData

, + _phantom: PhantomData P>, } #[async_trait::async_trait] @@ -331,11 +330,11 @@ where C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, C::Api: AuraApi>, E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - P: Pair + Send + Sync, - P::Public: AppPublic + Public + Member + Encode + Decode + Hash, - P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, + P: Pair, + P::Public: AppPublic + Member, + P::Signature: TryFrom> + Member + Codec, SO: SyncOracle + Send + Clone + Sync, L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, @@ -389,13 +388,10 @@ where header: B::Header, header_hash: &B::Hash, body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, + storage_changes: StorageChanges, public: Self::Claim, _authorities: Self::AuxData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - ConsensusError, - > { + ) -> Result, ConsensusError> { let signature_digest_item = crate::standalone::seal::<_, P>(header_hash, &public, &self.keystore)?; @@ -597,9 +593,7 @@ mod tests { impl Proposer for DummyProposer { type Error = Error; - type Transaction = - sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; + type Proposal = future::Ready, Error>>; type ProofRecording = DisableProofRecording; type Proof = (); @@ -806,10 +800,10 @@ mod tests { force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), telemetry: None, - _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, compatibility_mode: Default::default(), + _phantom: PhantomData:: AuthorityPair>, }; let head = Header::new( @@ -856,10 +850,10 @@ mod tests { force_authoring: false, backoff_authoring_blocks: Option::<()>::None, telemetry: None, - _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, compatibility_mode: Default::default(), + _phantom: PhantomData:: AuthorityPair>, }; let head = client.expect_header(client.info().genesis_hash).unwrap(); diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 2382d064d0219..e036ff1e64cd1 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" scale-info = { version = "2.5.0", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" log = "0.4.17" num-bigint = "0.4.3" @@ -30,27 +30,27 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } -sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] rand_chacha = "0.2.2" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tokio = "1.22.0" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index f81a14a97c758..7b16ea84c4366 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -15,24 +15,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } futures = "0.3.21" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } thiserror = "1.0" sc-consensus-babe = { version = "0.10.0-dev", path = "../" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../../epochs" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../rpc-api" } sp-api = { version = "4.0.0-dev", path = "../../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", path = "../../../../primitives/application-crypto" } sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../../primitives/consensus/common" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../../primitives/consensus/babe" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.85" tokio = "1.22.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" } -sp-keyring = { version = "7.0.0", path = "../../../../primitives/keyring" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../transaction-pool/api" } +sp-keyring = { version = "24.0.0", path = "../../../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 1ae15cc5453d7..bffe026ea6ef6 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -186,6 +186,8 @@ impl From for JsonRpseeError { #[cfg(test)] mod tests { use super::*; + use sc_consensus_babe::ImportQueueParams; + use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; use sp_consensus_babe::inherents::InherentDataProvider; use sp_core::{crypto::key_types::BABE, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; @@ -219,22 +221,25 @@ mod tests { sc_consensus_babe::block_import(config.clone(), client.clone(), client.clone()) .expect("can initialize block-import"); - let (_, babe_worker_handle) = sc_consensus_babe::import_queue( - link.clone(), - block_import.clone(), - None, - client.clone(), - longest_chain.clone(), - move |_, _| async move { + let (_, babe_worker_handle) = sc_consensus_babe::import_queue(ImportQueueParams { + link: link.clone(), + block_import: block_import.clone(), + justification_import: None, + client: client.clone(), + select_chain: longest_chain.clone(), + create_inherent_data_providers: move |_, _| async move { Ok((InherentDataProvider::from_timestamp_and_slot_duration( 0.into(), slot_duration, ),)) }, - &task_executor, - None, - None, - ) + spawner: &task_executor, + registry: None, + telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), + }) .unwrap(); Babe::new(client.clone(), babe_worker_handle, keystore, longest_chain, deny_unsafe) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 219b52294952a..b89fa8f5df65e 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -106,6 +106,7 @@ use sc_consensus_slots::{ SlotInfo, StorageChanges, }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::AppCrypto; use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -116,7 +117,7 @@ use sp_blockchain::{ use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; -use sp_core::ExecutionContext; +use sp_core::traits::SpawnEssentialNamed; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -491,11 +492,8 @@ where C::Api: BabeApi, SC: SelectChain + 'static, E: Environment + Send + Sync + 'static, - E::Proposer: Proposer>, - I: BlockImport> - + Send - + Sync - + 'static, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, L: sc_consensus::JustificationSyncLink + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, @@ -726,8 +724,8 @@ where C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, C::Api: BabeApi, E: Environment + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Clone + Sync, L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Sync, @@ -821,13 +819,10 @@ where header: B::Header, header_hash: &B::Hash, body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, + storage_changes: StorageChanges, (_, public): Self::Claim, epoch_descriptor: Self::AuxData, - ) -> Result< - BlockImportParams>::Transaction>, - ConsensusError, - > { + ) -> Result, ConsensusError> { let signature = self .keystore .sr25519_sign(::ID, public.as_ref(), header_hash.as_ref()) @@ -992,6 +987,7 @@ pub struct BabeVerifier { config: BabeConfiguration, epoch_changes: SharedEpochChanges, telemetry: Option, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, } impl BabeVerifier @@ -1008,12 +1004,11 @@ where at_hash: Block::Hash, inherent_data: InherentData, create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, ) -> Result<(), Error> { let inherent_res = self .client .runtime_api() - .check_inherents_with_context(at_hash, execution_context, block, inherent_data) + .check_inherents(at_hash, block, inherent_data) .map_err(Error::RuntimeApi)?; if !inherent_res.ok() { @@ -1098,8 +1093,13 @@ where }; // submit equivocation report at best block. - self.client - .runtime_api() + let mut runtime_api = self.client.runtime_api(); + + // Register the offchain tx pool to be able to use it from the runtime. + runtime_api + .register_extension(self.offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); + + runtime_api .submit_report_equivocation_unsigned_extrinsic( best_hash, equivocation_proof, @@ -1131,8 +1131,8 @@ where { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { trace!( target: LOG_TARGET, "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", @@ -1152,10 +1152,10 @@ where // Verification for imported blocks is skipped in two cases: // 1. When importing blocks below the last finalized block during network initial // synchronization. - // 2. When importing whole state we don't calculate epoch descriptor, but rather - // read it from the state after import. We also skip all verifications - // because there's no parent state and we trust the sync module to verify - // that the state is correct and finalized. + // 2. When importing whole state we don't calculate epoch descriptor, but rather read it + // from the state after import. We also skip all verifications because there's no + // parent state and we trust the sync module to verify that the state is correct and + // finalized. return Ok(block) } @@ -1250,7 +1250,6 @@ where parent_hash, inherent_data, create_inherent_data_providers, - block.origin.into(), ) .await?; } @@ -1331,7 +1330,7 @@ impl BabeBlockImport { impl BabeBlockImport where Block: BlockT, - Inner: BlockImport> + Send + Sync, + Inner: BlockImport + Send + Sync, Inner::Error: Into, Client: HeaderBackend + HeaderMetadata @@ -1346,7 +1345,7 @@ where // end up in an inconsistent state and have to resync. async fn import_state( &mut self, - mut block: BlockImportParams>, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let parent_hash = *block.header.parent_hash(); @@ -1395,7 +1394,7 @@ where impl BlockImport for BabeBlockImport where Block: BlockT, - Inner: BlockImport> + Send + Sync, + Inner: BlockImport + Send + Sync, Inner::Error: Into, Client: HeaderBackend + HeaderMetadata @@ -1406,11 +1405,10 @@ where Client::Api: BabeApi + ApiExt, { type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; async fn import_block( &mut self, - mut block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); @@ -1768,6 +1766,38 @@ where Ok((import, link)) } +/// Parameters passed to [`import_queue`]. +pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, CIDP, SelectChain, Spawn> { + /// The BABE link that is created by [`block_import`]. + pub link: BabeLink, + /// The block import that should be wrapped. + pub block_import: BI, + /// Optional justification import. + pub justification_import: Option>, + /// The client to interact with the internals of the node. + pub client: Arc, + /// A [`SelectChain`] implementation. + /// + /// Used to determine the best block that should be used as basis when sending an equivocation + /// report. + pub select_chain: SelectChain, + /// Used to crate the inherent data providers. + /// + /// These inherent data providers are then used to create the inherent data that is + /// passed to the `check_inherents` runtime call. + pub create_inherent_data_providers: CIDP, + /// Spawner for spawning futures. + pub spawner: &'a Spawn, + /// Registry for prometheus metrics. + pub registry: Option<&'a Registry>, + /// Optional telemetry handle to report telemetry events. + pub telemetry: Option, + /// The offchain transaction pool factory. + /// + /// Will be used when sending equivocation reports. + pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, +} + /// Start an import queue for the BABE consensus algorithm. /// /// This method returns the import queue, some data that needs to be passed to the block authoring @@ -1777,25 +1807,22 @@ where /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( - babe_link: BabeLink, - block_import: Inner, - justification_import: Option>, - client: Arc, - select_chain: SelectChain, - create_inherent_data_providers: CIDP, - spawner: &impl sp_core::traits::SpawnEssentialNamed, - registry: Option<&Registry>, - telemetry: Option, -) -> ClientResult<(DefaultImportQueue, BabeWorkerHandle)> +pub fn import_queue( + ImportQueueParams { + link: babe_link, + block_import, + justification_import, + client, + select_chain, + create_inherent_data_providers, + spawner, + registry, + telemetry, + offchain_tx_pool_factory, + }: ImportQueueParams<'_, Block, BI, Client, CIDP, SelectChain, Spawn>, +) -> ClientResult<(DefaultImportQueue, BabeWorkerHandle)> where - Inner: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, Client: ProvideRuntimeApi + HeaderBackend + HeaderMetadata @@ -1807,6 +1834,7 @@ where SelectChain: sp_consensus::SelectChain + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, + Spawn: SpawnEssentialNamed, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -1817,6 +1845,7 @@ where epoch_changes: babe_link.epoch_changes.clone(), telemetry, client: client.clone(), + offchain_tx_pool_factory, }; let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 59b4076e2fd01..b3843f8acfa0a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -21,11 +21,12 @@ use super::*; use authorship::claim_slot; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; +use sc_client_api::{BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_network_test::{Block as TestBlock, *}; +use sc_transaction_pool_api::RejectAllTxPool; use sp_application_crypto::key_types::BABE; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ @@ -96,16 +97,7 @@ impl DummyProposer { fn propose_with( &mut self, pre_digests: Digest, - ) -> future::Ready< - Result< - Proposal< - TestBlock, - sc_client_api::TransactionFor, - (), - >, - Error, - >, - > { + ) -> future::Ready, Error>> { let block_builder = self.factory.client.new_block_at(self.parent_hash, pre_digests, false).unwrap(); @@ -123,9 +115,7 @@ impl DummyProposer { impl Proposer for DummyProposer { type Error = Error; - type Transaction = - sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; + type Proposal = future::Ready, Error>>; type ProofRecording = DisableProofRecording; type Proof = (); @@ -150,15 +140,13 @@ pub struct PanickingBlockImport(B); #[async_trait::async_trait] impl> BlockImport for PanickingBlockImport where - B::Transaction: Send, B: Send, { type Error = B::Error; - type Transaction = B::Transaction; async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result { Ok(self.0.import_block(block).await.expect("importing block failed")) } @@ -206,8 +194,8 @@ impl Verifier for TestVerifier { /// presented to the User in the logs. async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { // apply post-sealing mutations (i.e. stripping seal, if desired). (self.mutator)(&mut block.header, Stage::PostSeal); self.inner.verify(block).await @@ -216,14 +204,7 @@ impl Verifier for TestVerifier { pub struct PeerData { link: BabeLink, - block_import: Mutex< - Option< - BoxBlockImport< - TestBlock, - TransactionFor, - >, - >, - >, + block_import: Mutex>>, } impl TestNetFactory for BabeTestNet { @@ -248,7 +229,7 @@ impl TestNetFactory for BabeTestNet { let block_import = PanickingBlockImport(block_import); let data_block_import = - Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>)); + Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_>)); ( BlockImportAdapter::new(block_import), None, @@ -283,6 +264,9 @@ impl TestNetFactory for BabeTestNet { config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }, mutator: MUTATOR.with(|m| m.borrow().clone()), } @@ -626,11 +610,11 @@ fn claim_vrf_check() { } // Propose and import a new BABE block on top of the given parent. -async fn propose_and_import_block( +async fn propose_and_import_block( parent: &TestHeader, slot: Option, proposer_factory: &mut DummyFactory, - block_import: &mut BoxBlockImport, + block_import: &mut BoxBlockImport, ) -> Hash { let mut proposer = proposer_factory.init(parent).await.unwrap(); @@ -697,10 +681,10 @@ async fn propose_and_import_block( // Propose and import n valid BABE blocks that are built on top of the given parent. // The proposer takes care of producing epoch change digests according to the epoch // duration (which is set to 6 slots in the test runtime). -async fn propose_and_import_blocks( +async fn propose_and_import_blocks( client: &PeersFullClient, proposer_factory: &mut DummyFactory, - block_import: &mut BoxBlockImport, + block_import: &mut BoxBlockImport, parent_hash: Hash, n: usize, ) -> Vec { diff --git a/client/consensus/beefy/Cargo.toml b/client/consensus/beefy/Cargo.toml index 161d53777ebc1..aec605c6bf11b 100644 --- a/client/consensus/beefy/Cargo.toml +++ b/client/consensus/beefy/Cargo.toml @@ -9,9 +9,10 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" +async-channel = "1.8.0" async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" futures = "0.3" log = "0.4" @@ -21,30 +22,28 @@ wasm-timer = "0.2.5" prometheus = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } -sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network = { version = "0.10.0-dev", path = "../../network" } -sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../../network-gossip" } sc-network-sync = { version = "0.10.0-dev", path = "../../network/sync" } sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } -sp-arithmetic = { version = "6.0.0", path = "../../../primitives/arithmetic" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } +sp-arithmetic = { version = "16.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-beefy = { version = "4.0.0-dev", path = "../../../primitives/consensus/beefy" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle-mountain-range" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde = "1.0.136" +serde = "1.0.163" tempfile = "3.1.0" tokio = "1.22.0" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/beefy/rpc/Cargo.toml b/client/consensus/beefy/rpc/Cargo.toml index d6dfa8731a3be..4f6e0d8c84b6b 100644 --- a/client/consensus/beefy/rpc/Cargo.toml +++ b/client/consensus/beefy/rpc/Cargo.toml @@ -9,18 +9,18 @@ description = "RPC for the BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4" parking_lot = "0.12.1" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } thiserror = "1.0" sc-consensus-beefy = { version = "4.0.0-dev", path = "../" } sp-consensus-beefy = { version = "4.0.0-dev", path = "../../../../primitives/consensus/beefy" } sc-rpc = { version = "4.0.0-dev", path = "../../../rpc" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.85" diff --git a/client/consensus/beefy/src/aux_schema.rs b/client/consensus/beefy/src/aux_schema.rs index 84186140b6925..409eb30d09ab9 100644 --- a/client/consensus/beefy/src/aux_schema.rs +++ b/client/consensus/beefy/src/aux_schema.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Block as BlockT; const VERSION_KEY: &[u8] = b"beefy_auxschema_version"; const WORKER_STATE_KEY: &[u8] = b"beefy_voter_state"; -const CURRENT_VERSION: u32 = 3; +const CURRENT_VERSION: u32 = 4; pub(crate) fn write_current_version(backend: &BE) -> ClientResult<()> { info!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); @@ -63,8 +63,8 @@ where match version { None => (), - Some(1) | Some(2) => (), // versions 1 & 2 are obsolete and should be simply ignored - Some(3) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), + Some(1) | Some(2) | Some(3) => (), // versions 1, 2 & 3 are obsolete and should be ignored + Some(4) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), other => return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), } diff --git a/client/consensus/beefy/src/communication/gossip.rs b/client/consensus/beefy/src/communication/gossip.rs index 9be648f8796c3..8c025ca067619 100644 --- a/client/consensus/beefy/src/communication/gossip.rs +++ b/client/consensus/beefy/src/communication/gossip.rs @@ -23,7 +23,7 @@ use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorCon use sp_core::hashing::twox_64; use sp_runtime::traits::{Block, Hash, Header, NumberFor}; -use codec::{Decode, Encode}; +use codec::{Decode, DecodeAll, Encode}; use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -41,7 +41,7 @@ use crate::{ LOG_TARGET, }; use sp_consensus_beefy::{ - crypto::{AuthorityId, Signature}, + ecdsa_crypto::{AuthorityId, Signature}, ValidatorSet, ValidatorSetId, VoteMessage, }; @@ -374,7 +374,7 @@ where mut data: &[u8], ) -> ValidationResult { let raw = data; - let action = match GossipMessage::::decode(&mut data) { + let action = match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender, raw), Ok(GossipMessage::FinalityProof(proof)) => self.validate_finality_proof(proof, sender), Err(e) => { @@ -402,7 +402,7 @@ where fn message_expired<'a>(&'a self) -> Box bool + 'a> { let filter = self.gossip_filter.read(); - Box::new(move |_topic, mut data| match GossipMessage::::decode(&mut data) { + Box::new(move |_topic, mut data| match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => { let round = msg.commitment.block_number; let set_id = msg.commitment.validator_set_id; @@ -446,7 +446,7 @@ where return do_rebroadcast } - match GossipMessage::::decode(&mut data) { + match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => { let round = msg.commitment.block_number; let set_id = msg.commitment.validator_set_id; @@ -476,9 +476,10 @@ pub(crate) mod tests { use super::*; use crate::keystore::BeefyKeystore; use sc_network_test::Block; + use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus_beefy::{ - crypto::Signature, known_payloads, Commitment, Keyring, MmrRootHash, Payload, - SignedCommitment, VoteMessage, KEY_TYPE, + ecdsa_crypto::Signature, known_payloads, Commitment, Keyring, MmrRootHash, Payload, + SignedCommitment, VoteMessage, }; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -536,7 +537,7 @@ pub(crate) mod tests { pub fn sign_commitment(who: &Keyring, commitment: &Commitment) -> Signature { let store = MemoryKeystore::new(); - store.ecdsa_generate_new(KEY_TYPE, Some(&who.to_seed())).unwrap(); + store.ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&who.to_seed())).unwrap(); let beefy_keystore: BeefyKeystore = Some(store.into()).into(); beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() } diff --git a/client/consensus/beefy/src/communication/mod.rs b/client/consensus/beefy/src/communication/mod.rs index 0de67f6062339..7f9535bfc23f1 100644 --- a/client/consensus/beefy/src/communication/mod.rs +++ b/client/consensus/beefy/src/communication/mod.rs @@ -117,7 +117,7 @@ mod tests { use beefy_protocol_name::{gossip_protocol_name, justifications_protocol_name}; // Create protocol name using random genesis hash. let genesis_hash = H256::random(); - let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); + let genesis_hex = array_bytes::bytes2hex("", genesis_hash); let expected_gossip_name = format!("/{}/beefy/2", genesis_hex); let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); diff --git a/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index d4f4b59f0195e..8240dd71104c2 100644 --- a/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -16,12 +16,9 @@ //! Helper for handling (i.e. answering) BEEFY justifications requests from a remote peer. -use codec::Decode; -use futures::{ - channel::{mpsc, oneshot}, - StreamExt, -}; -use log::{debug, trace}; +use codec::DecodeAll; +use futures::{channel::oneshot, StreamExt}; +use log::{debug, error, trace}; use sc_client_api::BlockBackend; use sc_network::{ config as netconfig, config::RequestResponseConfig, types::ProtocolName, PeerId, @@ -80,7 +77,7 @@ impl IncomingRequest { F: FnOnce(usize) -> Vec, { let netconfig::IncomingRequest { payload, peer, pending_response } = raw; - let payload = match JustificationRequest::decode(&mut payload.as_ref()) { + let payload = match JustificationRequest::decode_all(&mut payload.as_ref()) { Ok(payload) => payload, Err(err) => { let response = netconfig::OutgoingResponse { @@ -102,11 +99,11 @@ impl IncomingRequest { /// /// Takes care of decoding and handling of invalid encoded requests. pub(crate) struct IncomingRequestReceiver { - raw: mpsc::Receiver, + raw: async_channel::Receiver, } impl IncomingRequestReceiver { - pub fn new(inner: mpsc::Receiver) -> Self { + pub fn new(inner: async_channel::Receiver) -> Self { Self { raw: inner } } @@ -218,5 +215,9 @@ where }, } } + error!( + target: crate::LOG_TARGET, + "🥩 On-demand requests receiver stream terminated, closing worker." + ); } } diff --git a/client/consensus/beefy/src/communication/request_response/mod.rs b/client/consensus/beefy/src/communication/request_response/mod.rs index 545ab18cf1d34..1801512fa5421 100644 --- a/client/consensus/beefy/src/communication/request_response/mod.rs +++ b/client/consensus/beefy/src/communication/request_response/mod.rs @@ -23,7 +23,6 @@ pub(crate) mod outgoing_requests_engine; pub use incoming_requests_handler::BeefyJustifsRequestHandler; -use futures::channel::mpsc; use std::time::Duration; use codec::{Decode, Encode, Error as CodecError}; @@ -54,7 +53,7 @@ pub(crate) fn on_demand_justifications_protocol_config>( ) -> (IncomingRequestReceiver, RequestResponseConfig) { let name = justifications_protocol_name(genesis_hash, fork_id); let fallback_names = vec![]; - let (tx, rx) = mpsc::channel(JUSTIF_CHANNEL_SIZE); + let (tx, rx) = async_channel::bounded(JUSTIF_CHANNEL_SIZE); let rx = IncomingRequestReceiver::new(rx); let cfg = RequestResponseConfig { name, diff --git a/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index 10105ff2d417d..ef462a54fca5b 100644 --- a/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -26,7 +26,7 @@ use sc_network::{ request_responses::{IfDisconnected, RequestFailure}, NetworkRequest, PeerId, ProtocolName, }; -use sp_consensus_beefy::{crypto::AuthorityId, ValidatorSet}; +use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, ValidatorSet}; use sp_runtime::traits::{Block, NumberFor}; use std::{collections::VecDeque, result::Result, sync::Arc}; diff --git a/client/consensus/beefy/src/error.rs b/client/consensus/beefy/src/error.rs index 16afbf2185780..08b9960f41a1a 100644 --- a/client/consensus/beefy/src/error.rs +++ b/client/consensus/beefy/src/error.rs @@ -18,7 +18,7 @@ //! BEEFY gadget specific errors //! -//! Used for BEEFY gadget interal error handling only +//! Used for BEEFY gadget internal error handling only use std::fmt::Debug; @@ -34,6 +34,8 @@ pub enum Error { Signature(String), #[error("Session uninitialized")] UninitSession, + #[error("pallet-beefy was reset, please restart voter")] + ConsensusReset, } #[cfg(test)] @@ -45,6 +47,7 @@ impl PartialEq for Error { (Error::RuntimeApi(_), Error::RuntimeApi(_)) => true, (Error::Signature(s1), Error::Signature(s2)) => s1 == s2, (Error::UninitSession, Error::UninitSession) => true, + (Error::ConsensusReset, Error::ConsensusReset) => true, _ => false, } } diff --git a/client/consensus/beefy/src/import.rs b/client/consensus/beefy/src/import.rs index bda8169d95013..5b2abb20acede 100644 --- a/client/consensus/beefy/src/import.rs +++ b/client/consensus/beefy/src/import.rs @@ -20,9 +20,9 @@ use std::sync::Arc; use log::debug; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_api::ProvideRuntimeApi; use sp_consensus::Error as ConsensusError; -use sp_consensus_beefy::{BeefyApi, BEEFY_ENGINE_ID}; +use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, BeefyApi, BEEFY_ENGINE_ID}; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, EncodedJustification, @@ -83,7 +83,7 @@ where Block: BlockT, BE: Backend, Runtime: ProvideRuntimeApi, - Runtime::Api: BeefyApi + Send, + Runtime::Api: BeefyApi + Send, { fn decode_and_verify( &self, @@ -118,21 +118,15 @@ impl BlockImport for BeefyBlockImport, - I: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync, + I: BlockImport + Send + Sync, Runtime: ProvideRuntimeApi + Send + Sync, - Runtime::Api: BeefyApi, + Runtime::Api: BeefyApi, { type Error = ConsensusError; - type Transaction = TransactionFor; async fn import_block( &mut self, - mut block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); diff --git a/client/consensus/beefy/src/justification.rs b/client/consensus/beefy/src/justification.rs index 731acdfa63389..483184e2374a2 100644 --- a/client/consensus/beefy/src/justification.rs +++ b/client/consensus/beefy/src/justification.rs @@ -17,10 +17,10 @@ // along with this program. If not, see . use crate::keystore::BeefyKeystore; -use codec::{Decode, Encode}; +use codec::{DecodeAll, Encode}; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ - crypto::{AuthorityId, Signature}, + ecdsa_crypto::{AuthorityId, Signature}, ValidatorSet, ValidatorSetId, VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -43,7 +43,7 @@ pub(crate) fn decode_and_verify_finality_proof( target_number: NumberFor, validator_set: &ValidatorSet, ) -> Result, (ConsensusError, u32)> { - let proof = >::decode(&mut &*encoded) + let proof = >::decode_all(&mut &*encoded) .map_err(|_| (ConsensusError::InvalidJustification, 0))?; verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) } diff --git a/client/consensus/beefy/src/keystore.rs b/client/consensus/beefy/src/keystore.rs index 795d4cc8ade10..925bb08828220 100644 --- a/client/consensus/beefy/src/keystore.rs +++ b/client/consensus/beefy/src/keystore.rs @@ -16,15 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_application_crypto::RuntimeAppPublic; +use sp_application_crypto::{key_types::BEEFY as BEEFY_KEY_TYPE, RuntimeAppPublic}; use sp_core::keccak_256; use sp_keystore::KeystorePtr; use log::warn; use sp_consensus_beefy::{ - crypto::{Public, Signature}, - BeefyAuthorityId, KEY_TYPE, + ecdsa_crypto::{Public, Signature}, + BeefyAuthorityId, }; use crate::{error, LOG_TARGET}; @@ -50,7 +50,7 @@ impl BeefyKeystore { // we do check for multiple private keys as a key store sanity check. let public: Vec = keys .iter() - .filter(|k| store.has_keys(&[(k.to_raw_vec(), KEY_TYPE)])) + .filter(|k| store.has_keys(&[(k.to_raw_vec(), BEEFY_KEY_TYPE)])) .cloned() .collect(); @@ -78,7 +78,7 @@ impl BeefyKeystore { let public = public.as_ref(); let sig = store - .ecdsa_sign_prehashed(KEY_TYPE, public, &msg) + .ecdsa_sign_prehashed(BEEFY_KEY_TYPE, public, &msg) .map_err(|e| error::Error::Keystore(e.to_string()))? .ok_or_else(|| error::Error::Signature("ecdsa_sign_prehashed() failed".to_string()))?; @@ -96,7 +96,7 @@ impl BeefyKeystore { let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; let pk: Vec = - store.ecdsa_public_keys(KEY_TYPE).drain(..).map(Public::from).collect(); + store.ecdsa_public_keys(BEEFY_KEY_TYPE).drain(..).map(Public::from).collect(); Ok(pk) } @@ -117,7 +117,7 @@ impl From> for BeefyKeystore { #[cfg(test)] pub mod tests { - use sp_consensus_beefy::{crypto, Keyring}; + use sp_consensus_beefy::{ecdsa_crypto, Keyring}; use sp_core::{ecdsa, Pair}; use sp_keystore::testing::MemoryKeystore; @@ -156,35 +156,51 @@ pub mod tests { #[test] fn pair_works() { - let want = crypto::Pair::from_string("//Alice", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Alice", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Alice.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Bob", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Bob", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Bob.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Charlie", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Charlie", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Charlie.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Dave", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Dave", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Dave.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Eve", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Eve", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Eve.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Ferdie", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Ferdie", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Ferdie.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//One", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//One", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::One.pair().to_raw_vec(); assert_eq!(want, got); - let want = crypto::Pair::from_string("//Two", None).expect("Pair failed").to_raw_vec(); + let want = ecdsa_crypto::Pair::from_string("//Two", None) + .expect("Pair failed") + .to_raw_vec(); let got = Keyring::Two.pair().to_raw_vec(); assert_eq!(want, got); } @@ -193,8 +209,8 @@ pub mod tests { fn authority_id_works() { let store = keystore(); - let alice: crypto::Public = store - .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + let alice: ecdsa_crypto::Public = store + .ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&Keyring::Alice.to_seed())) .ok() .unwrap() .into(); @@ -219,8 +235,8 @@ pub mod tests { fn sign_works() { let store = keystore(); - let alice: crypto::Public = store - .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + let alice: ecdsa_crypto::Public = store + .ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&Keyring::Alice.to_seed())) .ok() .unwrap() .into(); @@ -239,7 +255,10 @@ pub mod tests { fn sign_error() { let store = keystore(); - store.ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Bob.to_seed())).ok().unwrap(); + store + .ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&Keyring::Bob.to_seed())) + .ok() + .unwrap(); let store: BeefyKeystore = Some(store).into(); @@ -268,8 +287,8 @@ pub mod tests { fn verify_works() { let store = keystore(); - let alice: crypto::Public = store - .ecdsa_generate_new(KEY_TYPE, Some(&Keyring::Alice.to_seed())) + let alice: ecdsa_crypto::Public = store + .ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&Keyring::Alice.to_seed())) .ok() .unwrap() .into(); @@ -305,11 +324,11 @@ pub mod tests { let _ = add_key(TEST_TYPE, None); // BEEFY keys - let _ = add_key(KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); - let _ = add_key(KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); + let _ = add_key(BEEFY_KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); + let _ = add_key(BEEFY_KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); - let key1: crypto::Public = add_key(KEY_TYPE, None).into(); - let key2: crypto::Public = add_key(KEY_TYPE, None).into(); + let key1: ecdsa_crypto::Public = add_key(BEEFY_KEY_TYPE, None).into(); + let key2: ecdsa_crypto::Public = add_key(BEEFY_KEY_TYPE, None).into(); let store: BeefyKeystore = Some(store).into(); diff --git a/client/consensus/beefy/src/lib.rs b/client/consensus/beefy/src/lib.rs index d3e5e4bc68936..da339dae7e1f5 100644 --- a/client/consensus/beefy/src/lib.rs +++ b/client/consensus/beefy/src/lib.rs @@ -46,8 +46,8 @@ use sp_blockchain::{ }; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - crypto::AuthorityId, BeefyApi, MmrRootHash, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, - GENESIS_AUTHORITY_SET_ID, + ecdsa_crypto::AuthorityId, BeefyApi, MmrRootHash, PayloadProvider, ValidatorSet, + BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; @@ -139,11 +139,9 @@ pub fn beefy_block_import_and_links( where B: Block, BE: Backend, - I: BlockImport> - + Send - + Sync, + I: BlockImport + Send + Sync, RuntimeApi: ProvideRuntimeApi + Send + Sync, - RuntimeApi::Api: BeefyApi, + RuntimeApi::Api: BeefyApi, { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = @@ -225,7 +223,7 @@ pub async fn start_beefy_gadget( C: Client + BlockBackend, P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi>, + R::Api: BeefyApi + MmrApi>, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, S: GossipSyncing + SyncOracle + 'static, { @@ -282,8 +280,14 @@ pub async fn start_beefy_gadget( let persisted_state = match wait_for_runtime_pallet(&*runtime, &mut gossip_engine, &mut finality_notifications) .await - .and_then(|best_grandpa| { - load_or_init_voter_state(&*backend, &*runtime, best_grandpa, min_block_delta) + .and_then(|(beefy_genesis, best_grandpa)| { + load_or_init_voter_state( + &*backend, + &*runtime, + beefy_genesis, + best_grandpa, + min_block_delta, + ) }) { Ok(state) => state, Err(e) => { @@ -316,9 +320,9 @@ pub async fn start_beefy_gadget( persisted_state, }; - futures::future::join( - worker.run(block_import_justif, finality_notifications), - on_demand_justifications_handler.run(), + futures::future::select( + Box::pin(worker.run(block_import_justif, finality_notifications)), + Box::pin(on_demand_justifications_handler.run()), ) .await; } @@ -326,6 +330,7 @@ pub async fn start_beefy_gadget( fn load_or_init_voter_state( backend: &BE, runtime: &R, + beefy_genesis: NumberFor, best_grandpa: ::Header, min_block_delta: u32, ) -> ClientResult> @@ -333,19 +338,24 @@ where B: Block, BE: Backend, R: ProvideRuntimeApi, - R::Api: BeefyApi, + R::Api: BeefyApi, { - // Initialize voter state from AUX DB or from pallet genesis. - if let Some(mut state) = crate::aux_schema::load_persistent(backend)? { - // Overwrite persisted state with current best GRANDPA block. - state.set_best_grandpa(best_grandpa); - // Overwrite persisted data with newly provided `min_block_delta`. - state.set_min_block_delta(min_block_delta); - info!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); - Ok(state) - } else { - initialize_voter_state(backend, runtime, best_grandpa, min_block_delta) - } + // Initialize voter state from AUX DB if compatible. + crate::aux_schema::load_persistent(backend)? + // Verify state pallet genesis matches runtime. + .filter(|state| state.pallet_genesis() == beefy_genesis) + .and_then(|mut state| { + // Overwrite persisted state with current best GRANDPA block. + state.set_best_grandpa(best_grandpa.clone()); + // Overwrite persisted data with newly provided `min_block_delta`. + state.set_min_block_delta(min_block_delta); + info!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); + Some(Ok(state)) + }) + // No valid voter-state persisted, re-initialize from pallet genesis. + .unwrap_or_else(|| { + initialize_voter_state(backend, runtime, beefy_genesis, best_grandpa, min_block_delta) + }) } // If no persisted state present, walk back the chain from first GRANDPA notification to either: @@ -355,6 +365,7 @@ where fn initialize_voter_state( backend: &BE, runtime: &R, + beefy_genesis: NumberFor, best_grandpa: ::Header, min_block_delta: u32, ) -> ClientResult> @@ -362,13 +373,14 @@ where B: Block, BE: Backend, R: ProvideRuntimeApi, - R::Api: BeefyApi, + R::Api: BeefyApi, { let beefy_genesis = runtime .runtime_api() .beefy_genesis(best_grandpa.hash()) .ok() .flatten() + .filter(|genesis| *genesis == beefy_genesis) .ok_or_else(|| ClientError::Backend("BEEFY pallet expected to be active.".into()))?; // Walk back the imported blocks and initialize voter either, at the last block with // a BEEFY justification, or at pallet genesis block; voter will resume from there. @@ -390,22 +402,26 @@ where let best_beefy = *header.number(); // If no session boundaries detected so far, just initialize new rounds here. if sessions.is_empty() { - let active_set = expect_validator_set(runtime, header.hash())?; + let active_set = expect_validator_set(runtime, backend, &header, beefy_genesis)?; let mut rounds = Rounds::new(best_beefy, active_set); // Mark the round as already finalized. rounds.conclude(best_beefy); sessions.push_front(rounds); } - let state = - PersistedState::checked_new(best_grandpa, best_beefy, sessions, min_block_delta) - .ok_or_else(|| ClientError::Backend("Invalid BEEFY chain".into()))?; + let state = PersistedState::checked_new( + best_grandpa, + best_beefy, + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| ClientError::Backend("Invalid BEEFY chain".into()))?; break state } if *header.number() == beefy_genesis { // We've reached BEEFY genesis, initialize voter here. - let genesis_set = - expect_validator_set(runtime, header.hash()).and_then(genesis_set_sanity_check)?; + let genesis_set = expect_validator_set(runtime, backend, &header, beefy_genesis)?; info!( target: LOG_TARGET, "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ @@ -415,8 +431,14 @@ where ); sessions.push_front(Rounds::new(beefy_genesis, genesis_set)); - break PersistedState::checked_new(best_grandpa, Zero::zero(), sessions, min_block_delta) - .ok_or_else(|| ClientError::Backend("Invalid BEEFY chain".into()))? + break PersistedState::checked_new( + best_grandpa, + Zero::zero(), + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| ClientError::Backend("Invalid BEEFY chain".into()))? } if let Some(active) = worker::find_authorities_change::(&header) { @@ -428,16 +450,8 @@ where sessions.push_front(Rounds::new(*header.number(), active)); } - // Check if state is still available if we move up the chain. - let parent_hash = *header.parent_hash(); - runtime.runtime_api().validator_set(parent_hash).ok().flatten().ok_or_else(|| { - let msg = format!("{}. Could not initialize BEEFY voter.", parent_hash); - error!(target: LOG_TARGET, "🥩 {}", msg); - ClientError::Consensus(sp_consensus::Error::StateUnavailable(msg)) - })?; - // Move up the chain. - header = blockchain.expect_header(parent_hash)?; + header = blockchain.expect_header(*header.parent_hash())?; }; aux_schema::write_current_version(backend)?; @@ -451,11 +465,11 @@ async fn wait_for_runtime_pallet( runtime: &R, mut gossip_engine: &mut GossipEngine, finality: &mut Fuse>, -) -> ClientResult<::Header> +) -> ClientResult<(NumberFor, ::Header)> where B: Block, R: ProvideRuntimeApi, - R::Api: BeefyApi, + R::Api: BeefyApi, { info!(target: LOG_TARGET, "🥩 BEEFY gadget waiting for BEEFY pallet to become available..."); loop { @@ -474,7 +488,7 @@ where "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", notif.header.number(), start ); - return Ok(notif.header) + return Ok((start, notif.header)) } } }, @@ -488,30 +502,36 @@ where Err(ClientError::Backend(err_msg)) } -fn genesis_set_sanity_check( - active: ValidatorSet, -) -> ClientResult> { - if active.id() == GENESIS_AUTHORITY_SET_ID { - Ok(active) - } else { - error!(target: LOG_TARGET, "🥩 Unexpected ID for genesis validator set {:?}.", active); - Err(ClientError::Backend("BEEFY Genesis sanity check failed.".into())) - } -} - -fn expect_validator_set( +fn expect_validator_set( runtime: &R, - at_hash: B::Hash, + backend: &BE, + at_header: &B::Header, + beefy_genesis: NumberFor, ) -> ClientResult> where B: Block, + BE: Backend, R: ProvideRuntimeApi, - R::Api: BeefyApi, + R::Api: BeefyApi, { runtime .runtime_api() - .validator_set(at_hash) + .validator_set(at_header.hash()) .ok() .flatten() - .ok_or_else(|| ClientError::Backend("BEEFY pallet expected to be active.".into())) + .or_else(|| { + // if state unavailable, fallback to walking up the chain looking for the header + // Digest emitted when validator set active 'at_header' was enacted. + let blockchain = backend.blockchain(); + let mut header = at_header.clone(); + while *header.number() >= beefy_genesis { + match worker::find_authorities_change::(&header) { + Some(active) => return Some(active), + // Move up the chain. + None => header = blockchain.expect_header(*header.parent_hash()).ok()?, + } + } + None + }) + .ok_or_else(|| ClientError::Backend("Could not find initial validator set".into())) } diff --git a/client/consensus/beefy/src/round.rs b/client/consensus/beefy/src/round.rs index d8948ff98c552..6f400ce47843c 100644 --- a/client/consensus/beefy/src/round.rs +++ b/client/consensus/beefy/src/round.rs @@ -21,7 +21,7 @@ use crate::LOG_TARGET; use codec::{Decode, Encode}; use log::debug; use sp_consensus_beefy::{ - crypto::{AuthorityId, Signature}, + ecdsa_crypto::{AuthorityId, Signature}, Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; diff --git a/client/consensus/beefy/src/tests.rs b/client/consensus/beefy/src/tests.rs index 288a9fde5b817..3bb65e9d57f43 100644 --- a/client/consensus/beefy/src/tests.rs +++ b/client/consensus/beefy/src/tests.rs @@ -48,14 +48,15 @@ use sc_network_test::{ use sc_utils::notification::NotificationReceiver; use serde::{Deserialize, Serialize}; use sp_api::{ApiRef, ProvideRuntimeApi}; +use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus::BlockOrigin; use sp_consensus_beefy::{ - crypto::{AuthorityId, Signature}, + ecdsa_crypto::{AuthorityId, Signature}, known_payloads, mmr::{find_mmr_root_digest, MmrRootProvider}, BeefyApi, Commitment, ConsensusLog, EquivocationProof, Keyring as BeefyKeyring, MmrRootHash, OpaqueKeyOwnershipProof, Payload, SignedCommitment, ValidatorSet, ValidatorSetId, - VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_core::H256; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; @@ -82,7 +83,7 @@ type BeefyBlockImport = crate::BeefyBlockImport< Block, substrate_test_runtime_client::Backend, TestApi, - BlockImportAdapter>, + BlockImportAdapter, >; pub(crate) type BeefyValidatorSet = ValidatorSet; @@ -293,7 +294,7 @@ impl ProvideRuntimeApi for TestApi { } } sp_api::mock_impl_runtime_apis! { - impl BeefyApi for RuntimeApi { + impl BeefyApi for RuntimeApi { fn beefy_genesis() -> Option> { Some(self.inner.beefy_genesis) } @@ -352,7 +353,7 @@ pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> KeystorePtr { let keystore = MemoryKeystore::new(); keystore - .ecdsa_generate_new(BeefyKeyType, Some(&authority.to_seed())) + .ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&authority.to_seed())) .expect("Creates authority key"); keystore.into() } @@ -373,8 +374,9 @@ async fn voter_init_setup( gossip_validator, None, ); - let best_grandpa = wait_for_runtime_pallet(api, &mut gossip_engine, finality).await.unwrap(); - load_or_init_voter_state(&*backend, api, best_grandpa, 1) + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(api, &mut gossip_engine, finality).await.unwrap(); + load_or_init_voter_state(&*backend, api, beefy_genesis, best_grandpa, 1) } // Spawns beefy voters. Returns a future to spawn on the runtime. @@ -385,7 +387,7 @@ fn initialize_beefy( ) -> impl Future where API: ProvideRuntimeApi + Sync + Send, - API::Api: BeefyApi + MmrApi>, + API::Api: BeefyApi + MmrApi>, { let tasks = FuturesUnordered::new(); @@ -981,9 +983,7 @@ async fn should_initialize_voter_at_genesis() { // push 15 blocks with `AuthorityChange` digests every 10 blocks let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false).await; - let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); - // finalize 13 without justifications net.peer(0).client().as_client().finalize_block(hashes[13], None).unwrap(); @@ -1022,11 +1022,9 @@ async fn should_initialize_voter_at_custom_genesis() { let custom_pallet_genesis = 7; let api = TestApi::new(custom_pallet_genesis, &validator_set, GOOD_MMR_ROOT); - // push 15 blocks with `AuthorityChange` digests every 10 blocks - let hashes = net.generate_blocks_and_sync(15, 10, &validator_set, false).await; - + // push 15 blocks with `AuthorityChange` digests every 15 blocks + let hashes = net.generate_blocks_and_sync(15, 15, &validator_set, false).await; let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse(); - // finalize 3, 5, 8 without justifications net.peer(0).client().as_client().finalize_block(hashes[3], None).unwrap(); net.peer(0).client().as_client().finalize_block(hashes[5], None).unwrap(); @@ -1053,6 +1051,35 @@ async fn should_initialize_voter_at_custom_genesis() { assert!(verify_persisted_version(&*backend)); let state = load_persistent(&*backend).unwrap().unwrap(); assert_eq!(state, persisted_state); + + // now re-init after genesis changes + + // should ignore existing aux db state and reinit at new genesis + let new_validator_set = ValidatorSet::new(make_beefy_ids(keys), 42).unwrap(); + let new_pallet_genesis = 10; + let api = TestApi::new(new_pallet_genesis, &new_validator_set, GOOD_MMR_ROOT); + + net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap(); + // load persistent state - state preset in DB, but with different pallet genesis + let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + + // verify voter initialized with single session starting at block `new_pallet_genesis` (10) + let sessions = new_persisted_state.voting_oracle().sessions(); + assert_eq!(sessions.len(), 1); + assert_eq!(sessions[0].session_start(), new_pallet_genesis); + let rounds = new_persisted_state.active_round().unwrap(); + assert_eq!(rounds.session_start(), new_pallet_genesis); + assert_eq!(rounds.validator_set_id(), new_validator_set.id()); + + // verify next vote target is mandatory block 10 + assert_eq!(new_persisted_state.best_beefy_block(), 0); + assert_eq!(new_persisted_state.best_grandpa_number(), 10); + assert_eq!(new_persisted_state.voting_oracle().voting_target(), Some(new_pallet_genesis)); + + // verify state also saved to db + assert!(verify_persisted_version(&*backend)); + let state = load_persistent(&*backend).unwrap().unwrap(); + assert_eq!(state, new_persisted_state); } #[tokio::test] @@ -1166,7 +1193,7 @@ async fn beefy_finalizing_after_pallet_genesis() { sp_tracing::try_init_simple(); let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 14).unwrap(); let session_len = 10; let min_block_delta = 1; let pallet_genesis = 15; @@ -1237,18 +1264,34 @@ async fn beefy_reports_equivocations() { let peers = peers.into_iter().enumerate(); // finalize block #1 -> BEEFY should not finalize anything (each node votes on different MMR). - finalize_block_and_wait_for_beefy(&net, peers, &hashes[1], &[]).await; + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + peers.clone().for_each(|(index, _)| { + let client = net.lock().peer(index).client().as_client(); + client.finalize_block(hashes[1], None).unwrap(); + }); - // Verify neither Bob or Bob_Prime report themselves as equivocating. - assert!(api_bob.reported_equivocations.as_ref().unwrap().lock().is_empty()); - assert!(api_bob_prime.reported_equivocations.as_ref().unwrap().lock().is_empty()); + // run for up to 5 seconds waiting for Alice's report of Bob/Bob_Prime equivocation. + for wait_ms in [250, 500, 1250, 3000] { + run_for(Duration::from_millis(wait_ms), &net).await; + if !api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty() { + break + } + } - // Verify Alice reports Bob/Bob_Prime equivocation. + // Verify expected equivocation let alice_reported_equivocations = api_alice.reported_equivocations.as_ref().unwrap().lock(); assert_eq!(alice_reported_equivocations.len(), 1); let equivocation_proof = alice_reported_equivocations.get(0).unwrap(); assert_eq!(equivocation_proof.first.id, BeefyKeyring::Bob.public()); assert_eq!(equivocation_proof.first.commitment.block_number, 1); + + // Verify neither Bob or Bob_Prime report themselves as equivocating. + assert!(api_bob.reported_equivocations.as_ref().unwrap().lock().is_empty()); + assert!(api_bob_prime.reported_equivocations.as_ref().unwrap().lock().is_empty()); + + // sanity verify no new blocks have been finalized by BEEFY + streams_empty_after_timeout(best_blocks, &net, None).await; + streams_empty_after_timeout(versioned_finality_proof, &net, None).await; } #[tokio::test] diff --git a/client/consensus/beefy/src/worker.rs b/client/consensus/beefy/src/worker.rs index c05de197d58fd..17a8891b06142 100644 --- a/client/consensus/beefy/src/worker.rs +++ b/client/consensus/beefy/src/worker.rs @@ -30,7 +30,7 @@ use crate::{ round::{Rounds, VoteImportResult}, BeefyVoterLinks, LOG_TARGET, }; -use codec::{Codec, Decode, Encode}; +use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; @@ -41,7 +41,7 @@ use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ check_equivocation_proof, - crypto::{AuthorityId, Signature}, + ecdsa_crypto::{AuthorityId, Signature}, BeefyApi, Commitment, ConsensusLog, EquivocationProof, PayloadProvider, ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; @@ -69,17 +69,20 @@ pub(crate) enum RoundAction { /// Responsible for the voting strategy. /// It chooses which incoming votes to accept and which votes to generate. /// Keeps track of voting seen for current and future rounds. +/// +/// Note: this is part of `PersistedState` so any changes here should also bump +/// aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// /// There are three voter states coresponding to three queue states: /// 1. voter uninitialized: queue empty, - /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: - /// queue has ONE element, the 'current session' where `mandatory_done == true`, + /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: queue has ONE + /// element, the 'current session' where `mandatory_done == true`, /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. - /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's - /// popped off the queue, eventually getting to state `2. up-to-date`. + /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's popped + /// off the queue, eventually getting to state `2. up-to-date`. sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, @@ -256,6 +259,9 @@ impl VoterOracle { } } +/// BEEFY voter state persisted in aux DB. +/// +/// Note: Any changes here should also bump aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] pub(crate) struct PersistedState { /// Best block we voted on. @@ -263,6 +269,8 @@ pub(crate) struct PersistedState { /// Chooses which incoming votes to accept and which votes to generate. /// Keeps track of voting seen for current and future rounds. voting_oracle: VoterOracle, + /// Pallet-beefy genesis block - block number when BEEFY consensus started for this chain. + pallet_genesis: NumberFor, } impl PersistedState { @@ -271,9 +279,19 @@ impl PersistedState { best_beefy: NumberFor, sessions: VecDeque>, min_block_delta: u32, + pallet_genesis: NumberFor, ) -> Option { - VoterOracle::checked_new(sessions, min_block_delta, grandpa_header, best_beefy) - .map(|voting_oracle| PersistedState { best_voted: Zero::zero(), voting_oracle }) + VoterOracle::checked_new(sessions, min_block_delta, grandpa_header, best_beefy).map( + |voting_oracle| PersistedState { + best_voted: Zero::zero(), + voting_oracle, + pallet_genesis, + }, + ) + } + + pub fn pallet_genesis(&self) -> NumberFor { + self.pallet_genesis } pub(crate) fn set_min_block_delta(&mut self, min_block_delta: u32) { @@ -330,7 +348,7 @@ where P: PayloadProvider, S: SyncOracle, R: ProvideRuntimeApi, - R::Api: BeefyApi, + R::Api: BeefyApi, { fn best_grandpa_block(&self) -> NumberFor { *self.persisted_state.voting_oracle.best_grandpa_block_header.number() @@ -411,7 +429,10 @@ where ); } - fn handle_finality_notification(&mut self, notification: &FinalityNotification) { + fn handle_finality_notification( + &mut self, + notification: &FinalityNotification, + ) -> Result<(), Error> { debug!( target: LOG_TARGET, "🥩 Finality notification: header {:?} tree_route {:?}", @@ -420,6 +441,18 @@ where ); let header = ¬ification.header; + self.runtime + .runtime_api() + .beefy_genesis(header.hash()) + .ok() + .flatten() + .filter(|genesis| *genesis == self.persisted_state.pallet_genesis) + .ok_or_else(|| { + let err = Error::ConsensusReset; + error!(target: LOG_TARGET, "🥩 Error: {}", err); + err + })?; + if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen self.persisted_state.set_best_grandpa(header.clone()); @@ -451,6 +484,8 @@ where error!(target: LOG_TARGET, "🥩 Voter error: {:?}", e); } } + + Ok(()) } /// Based on [VoterOracle] this vote is either processed here or discarded. @@ -775,7 +810,7 @@ where self.gossip_engine .messages_for(votes_topic::()) .filter_map(|notification| async move { - let vote = GossipMessage::::decode(&mut ¬ification.message[..]) + let vote = GossipMessage::::decode_all(&mut ¬ification.message[..]) .ok() .and_then(|message| message.unwrap_vote()); trace!(target: LOG_TARGET, "🥩 Got vote message: {:?}", vote); @@ -787,7 +822,7 @@ where self.gossip_engine .messages_for(proofs_topic::()) .filter_map(|notification| async move { - let proof = GossipMessage::::decode(&mut ¬ification.message[..]) + let proof = GossipMessage::::decode_all(&mut ¬ification.message[..]) .ok() .and_then(|message| message.unwrap_finality_proof()); trace!(target: LOG_TARGET, "🥩 Got gossip proof message: {:?}", proof); @@ -813,9 +848,9 @@ where // Use `select_biased!` to prioritize order below. // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { - if let Some(notification) = notification { - self.handle_finality_notification(¬ification); - } else { + if notification.and_then(|notif| { + self.handle_finality_notification(¬if).ok() + }).is_none() { error!(target: LOG_TARGET, "🥩 Finality stream terminated, closing worker."); return; } @@ -1086,6 +1121,7 @@ pub(crate) mod tests { }; let backend = peer.client().as_backend(); + let beefy_genesis = 1; let api = Arc::new(TestApi::with_validator_set(&genesis_validator_set)); let network = peer.network_service().clone(); let sync = peer.sync_service().clone(); @@ -1118,6 +1154,7 @@ pub(crate) mod tests { Zero::zero(), vec![Rounds::new(One::one(), genesis_validator_set)].into(), min_block_delta, + beefy_genesis, ) .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index e953d67965627..adb6f0920beef 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -28,9 +28,9 @@ sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } [dev-dependencies] sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 70bf0283af2d9..a451692ad478e 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -20,7 +20,7 @@ use serde::{Deserialize, Serialize}; use sp_runtime::{ - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor}, DigestItem, Justification, Justifications, }; use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; @@ -119,9 +119,9 @@ pub struct BlockCheckParams { } /// Precomputed storage. -pub enum StorageChanges { +pub enum StorageChanges { /// Changes coming from block execution. - Changes(sp_state_machine::StorageChanges>), + Changes(sp_state_machine::StorageChanges>), /// Whole new state. Import(ImportedState), } @@ -142,9 +142,9 @@ impl std::fmt::Debug for ImportedState { } /// Defines how a new state is computed for a given imported block. -pub enum StateAction { +pub enum StateAction { /// Apply precomputed changes coming from block execution or state sync. - ApplyChanges(StorageChanges), + ApplyChanges(StorageChanges), /// Execute block body (required) and compute state. Execute, /// Execute block body if parent state is available and compute state. @@ -153,7 +153,7 @@ pub enum StateAction { Skip, } -impl StateAction { +impl StateAction { /// Check if execution checks that require runtime calls should be skipped. pub fn skip_execution_checks(&self) -> bool { match self { @@ -167,7 +167,7 @@ impl StateAction { /// Data required to import a Block. #[non_exhaustive] -pub struct BlockImportParams { +pub struct BlockImportParams { /// Origin of the Block pub origin: BlockOrigin, /// The header, without consensus post-digests applied. This should be in the same @@ -192,7 +192,7 @@ pub struct BlockImportParams { /// Indexed transaction body of the block. pub indexed_body: Option>>, /// Specify how the new state is computed. - pub state_action: StateAction, + pub state_action: StateAction, /// Is this block finalized already? /// `true` implies instant finality. pub finalized: bool, @@ -218,7 +218,7 @@ pub struct BlockImportParams { pub post_hash: Option, } -impl BlockImportParams { +impl BlockImportParams { /// Create a new block import params. pub fn new(origin: BlockOrigin, header: Block::Header) -> Self { Self { @@ -261,39 +261,6 @@ impl BlockImportParams { } } - /// Auxiliary function for "converting" the transaction type. - /// - /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that - /// `Self` now uses a different transaction type. - pub fn clear_storage_changes_and_mutate( - self, - ) -> BlockImportParams { - // Preserve imported state. - let state_action = match self.state_action { - StateAction::ApplyChanges(StorageChanges::Import(state)) => - StateAction::ApplyChanges(StorageChanges::Import(state)), - StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip, - StateAction::Execute => StateAction::Execute, - StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible, - StateAction::Skip => StateAction::Skip, - }; - BlockImportParams { - origin: self.origin, - header: self.header, - justifications: self.justifications, - post_digests: self.post_digests, - body: self.body, - indexed_body: self.indexed_body, - state_action, - finalized: self.finalized, - auxiliary: self.auxiliary, - intermediates: self.intermediates, - fork_choice: self.fork_choice, - import_existing: self.import_existing, - post_hash: self.post_hash, - } - } - /// Insert intermediate by given key. pub fn insert_intermediate(&mut self, key: &'static [u8], value: T) { self.intermediates.insert(Cow::from(key), Box::new(value)); @@ -338,8 +305,6 @@ impl BlockImportParams { pub trait BlockImport { /// The error type. type Error: std::error::Error + Send + 'static; - /// The transaction type used by the backend. - type Transaction: Send + 'static; /// Check block preconditions. async fn check_block( @@ -350,17 +315,13 @@ pub trait BlockImport { /// Import a block. async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result; } #[async_trait::async_trait] -impl BlockImport for crate::import_queue::BoxBlockImport -where - Transaction: Send + 'static, -{ +impl BlockImport for crate::import_queue::BoxBlockImport { type Error = sp_consensus::error::Error; - type Transaction = Transaction; /// Check block preconditions. async fn check_block( @@ -373,21 +334,19 @@ where /// Import a block. async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result { (**self).import_block(block).await } } #[async_trait::async_trait] -impl BlockImport for Arc +impl BlockImport for Arc where - for<'r> &'r T: BlockImport, + for<'r> &'r T: BlockImport, T: Send + Sync, - Transaction: Send + 'static, { type Error = E; - type Transaction = Transaction; async fn check_block( &mut self, @@ -398,7 +357,7 @@ where async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result { (&**self).import_block(block).await } diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 11ebbd4036a20..39d5bf8ed35d1 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -50,16 +50,14 @@ const LOG_TARGET: &str = "sync::import-queue"; /// A commonly-used Import Queue type. /// /// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. -pub type DefaultImportQueue = - BasicQueue>; +pub type DefaultImportQueue = BasicQueue; mod basic_queue; pub mod buffered_link; pub mod mock; /// Shared block import struct used by the queue. -pub type BoxBlockImport = - Box + Send + Sync>; +pub type BoxBlockImport = Box + Send + Sync>; /// Shared justification import struct used by the queue. pub type BoxJustificationImport = @@ -95,13 +93,11 @@ pub struct IncomingBlock { /// Verify a justification of a block #[async_trait::async_trait] -pub trait Verifier: Send + Sync { +pub trait Verifier: Send { /// Verify the given block data and return the `BlockImportParams` to /// continue the block import process. - async fn verify( - &mut self, - block: BlockImportParams, - ) -> Result, String>; + async fn verify(&mut self, block: BlockImportParams) + -> Result, String>; } /// Blocks import queue API. @@ -221,8 +217,8 @@ pub enum BlockImportError { type BlockImportResult = Result>, BlockImportError>; /// Single block import function. -pub async fn import_single_block, Transaction: Send + 'static>( - import_handle: &mut impl BlockImport, +pub async fn import_single_block>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, @@ -231,12 +227,8 @@ pub async fn import_single_block, Transaction: Send + } /// Single block import function with metering. -pub(crate) async fn import_single_block_metered< - B: BlockT, - V: Verifier, - Transaction: Send + 'static, ->( - import_handle: &mut impl BlockImport, +pub(crate) async fn import_single_block_metered>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, @@ -350,7 +342,6 @@ pub(crate) async fn import_single_block_metered< metrics.report_verification(true, started.elapsed()); } - let import_block = import_block.clear_storage_changes_and_mutate(); let imported = import_handle.import_block(import_block).await; if let Some(metrics) = metrics.as_ref() { metrics.report_verification_and_import(started.elapsed()); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index b93913703d39f..1cc7ec26fd193 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -28,7 +28,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, }; -use std::{marker::PhantomData, pin::Pin, time::Duration}; +use std::{pin::Pin, time::Duration}; use crate::{ import_queue::{ @@ -42,15 +42,14 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. -pub struct BasicQueue { +pub struct BasicQueue { /// Handle for sending justification and block import messages to the background task. handle: BasicQueueHandle, /// Results coming from the worker task. result_port: BufferedLinkReceiver, - _phantom: PhantomData, } -impl Drop for BasicQueue { +impl Drop for BasicQueue { fn drop(&mut self) { // Flush the queue and close the receiver to terminate the future. self.handle.close(); @@ -58,13 +57,13 @@ impl Drop for BasicQueue { } } -impl BasicQueue { +impl BasicQueue { /// Instantiate a new basic queue, with given verifier. /// /// This creates a background task, and calls `on_start` on the justification importer. pub fn new>( verifier: V, - block_import: BoxBlockImport, + block_import: BoxBlockImport, justification_import: Option>, spawner: &impl sp_core::traits::SpawnEssentialNamed, prometheus_registry: Option<&Registry>, @@ -96,7 +95,6 @@ impl BasicQueue { Self { handle: BasicQueueHandle::new(justification_sender, block_import_sender), result_port, - _phantom: PhantomData, } } } @@ -165,7 +163,7 @@ impl ImportQueueService for BasicQueueHandle { } #[async_trait::async_trait] -impl ImportQueue for BasicQueue { +impl ImportQueue for BasicQueue { /// Get handle to [`ImportQueueService`]. fn service(&self) -> Box> { Box::new(self.handle.clone()) @@ -220,8 +218,8 @@ mod worker_messages { /// to give other futures the possibility to be run. /// /// Returns when `block_import` ended. -async fn block_import_process( - mut block_import: BoxBlockImport, +async fn block_import_process( + mut block_import: BoxBlockImport, mut verifier: impl Verifier, mut result_sender: BufferedLinkSender, mut block_import_receiver: TracingUnboundedReceiver>, @@ -262,10 +260,10 @@ struct BlockImportWorker { } impl BlockImportWorker { - fn new, Transaction: Send + 'static>( + fn new>( result_sender: BufferedLinkSender, verifier: V, - block_import: BoxBlockImport, + block_import: BoxBlockImport, justification_import: Option>, metrics: Option, ) -> ( @@ -391,8 +389,8 @@ struct ImportManyBlocksResult { /// /// This will yield after each imported block once, to ensure that other futures can /// be called as well. -async fn import_many_blocks, Transaction: Send + 'static>( - import_handle: &mut BoxBlockImport, +async fn import_many_blocks>( + import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, blocks: Vec>, verifier: &mut V, @@ -507,14 +505,14 @@ mod tests { import_queue::Verifier, }; use futures::{executor::block_on, Future}; - use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; + use sp_test_primitives::{Block, BlockNumber, Hash, Header}; #[async_trait::async_trait] impl Verifier for () { async fn verify( &mut self, - block: BlockImportParams, - ) -> Result, String> { + block: BlockImportParams, + ) -> Result, String> { Ok(BlockImportParams::new(block.origin, block.header)) } } @@ -522,7 +520,6 @@ mod tests { #[async_trait::async_trait] impl BlockImport for () { type Error = sp_consensus::Error; - type Transaction = Extrinsic; async fn check_block( &mut self, @@ -533,7 +530,7 @@ mod tests { async fn import_block( &mut self, - _block: BlockImportParams, + _block: BlockImportParams, ) -> Result { Ok(ImportResult::imported(true)) } diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 89588cc7d4c5c..6ee4597541d22 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../common" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } diff --git a/client/consensus/grandpa/Cargo.toml b/client/consensus/grandpa/Cargo.toml index 67c58d37a2cf5..af39c640122b6 100644 --- a/client/consensus/grandpa/Cargo.toml +++ b/client/consensus/grandpa/Cargo.toml @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" -array-bytes = "4.1" +array-bytes = "6.1" async-trait = "0.1.57" dyn-clone = "1.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parity-scale-codec = { version = "3.2.2", features = ["derive"] } +parity-scale-codec = { version = "3.6.1", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" serde_json = "1.0.85" @@ -32,6 +32,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" } sc-consensus = { version = "0.10.0-dev", path = "../common" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-gossip = { version = "0.10.0-dev", path = "../../network-gossip" } @@ -39,22 +40,22 @@ sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } -sp-arithmetic = { version = "6.0.0", path = "../../../primitives/arithmetic" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } +sp-arithmetic = { version = "16.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } -serde = "1.0.136" +serde = "1.0.163" tokio = "1.22.0" sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/grandpa/rpc/Cargo.toml b/client/consensus/grandpa/rpc/Cargo.toml index 76a06ad298f77..5c3b5a171b928 100644 --- a/client/consensus/grandpa/rpc/Cargo.toml +++ b/client/consensus/grandpa/rpc/Cargo.toml @@ -14,21 +14,21 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.16" jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4.8" -parity-scale-codec = { version = "3.2.2", features = ["derive"] } -serde = { version = "1.0.105", features = ["derive"] } +parity-scale-codec = { version = "3.6.1", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } thiserror = "1.0" sc-client-api = { version = "4.0.0-dev", path = "../../../api" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../" } sc-rpc = { version = "4.0.0-dev", path = "../../../rpc" } sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../../block-builder" } sc-rpc = { version = "4.0.0-dev", features = ["test-helpers"], path = "../../../rpc" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../../primitives/consensus/grandpa" } -sp-keyring = { version = "7.0.0", path = "../../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/client/consensus/grandpa/src/communication/gossip.rs b/client/consensus/grandpa/src/communication/gossip.rs index 2c0fe3d8571e5..5688aff3ea717 100644 --- a/client/consensus/grandpa/src/communication/gossip.rs +++ b/client/consensus/grandpa/src/communication/gossip.rs @@ -87,7 +87,7 @@ use ahash::{AHashMap, AHashSet}; use log::{debug, trace}; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::{Decode, DecodeAll, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; use sc_network::{PeerId, ReputationChange}; @@ -546,9 +546,8 @@ impl Peers { who: &PeerId, update: NeighborPacket, ) -> Result>, Misbehavior> { - let peer = match self.inner.get_mut(who) { - None => return Ok(None), - Some(p) => p, + let Some(peer) = self.inner.get_mut(who) else { + return Ok(None) }; let invalid_change = peer.view.set_id > update.set_id || @@ -1040,9 +1039,8 @@ impl Inner { request: CatchUpRequestMessage, set_state: &environment::SharedVoterSetState, ) -> (Option>, Action) { - let local_view = match self.local_view { - None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - Some(ref view) => view, + let Some(local_view) = &self.local_view else { + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) }; if request.set_id != local_view.set_id { @@ -1175,10 +1173,8 @@ impl Inner { Err(misbehavior) => (misbehavior.cost(), None), }; - let (catch_up, report) = match update_res { - Ok(_) => self.try_catch_up(who), - _ => (None, None), - }; + let (catch_up, report) = + if update_res.is_ok() { self.try_catch_up(who) } else { (None, None) }; let neighbor_topics = topics.unwrap_or_default(); @@ -1435,7 +1431,7 @@ impl GossipValidator { let message_name; let action = { - match GossipMessage::::decode(&mut data) { + match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(ref message)) => { message_name = Some("vote"); self.inner.write().validate_round_message(who, message) @@ -1599,9 +1595,8 @@ impl sc_network_gossip::Validator for GossipValidator return false, - Some(x) => x, + let Some((maybe_round, set_id)) = inner.live_topics.topic_info(topic) else { + return false }; if let MessageIntent::Broadcast = intent { @@ -1622,12 +1617,11 @@ impl sc_network_gossip::Validator for GossipValidator v, - None => return false, // cannot evaluate until we have a local view. + let Some(local_view) = &inner.local_view else { + return false // cannot evaluate until we have a local view. }; - match GossipMessage::::decode(&mut data) { + match GossipMessage::::decode_all(&mut data) { Err(_) => false, Ok(GossipMessage::Commit(full)) => { // we only broadcast commit messages if they're for the same @@ -1658,13 +1652,12 @@ impl sc_network_gossip::Validator for GossipValidator {}, }; - let local_view = match inner.local_view { - Some(ref v) => v, - None => return true, // no local view means we can't evaluate or hold any topic. + let Some(local_view) = &inner.local_view else { + return true // no local view means we can't evaluate or hold any topic. }; // global messages -- only keep the best commit. - match GossipMessage::::decode(&mut data) { + match GossipMessage::::decode_all(&mut data) { Err(_) => true, Ok(GossipMessage::Commit(full)) => match local_view.last_commit { Some((number, round, set_id)) => @@ -1700,7 +1693,7 @@ mod tests { fn config() -> crate::Config { crate::Config { gossip_duration: Duration::from_millis(10), - justification_period: 256, + justification_generation_period: 256, keystore: None, name: None, local_role: Role::Authority, diff --git a/client/consensus/grandpa/src/communication/mod.rs b/client/consensus/grandpa/src/communication/mod.rs index 9d90035d71cbb..c0749858568f5 100644 --- a/client/consensus/grandpa/src/communication/mod.rs +++ b/client/consensus/grandpa/src/communication/mod.rs @@ -45,7 +45,7 @@ use finality_grandpa::{ voter_set::VoterSet, Message::{Precommit, Prevote, PrimaryPropose}, }; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::{Decode, DecodeAll, Encode}; use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; @@ -352,7 +352,7 @@ impl, S: Syncing> NetworkBridge { let telemetry = self.telemetry.clone(); let incoming = self.gossip_engine.lock().messages_for(topic).filter_map(move |notification| { - let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); + let decoded = GossipMessage::::decode_all(&mut ¬ification.message[..]); match decoded { Err(ref e) => { @@ -651,7 +651,7 @@ fn incoming_global( .messages_for(topic) .filter_map(|notification| { // this could be optimized by decoding piecewise. - let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); + let decoded = GossipMessage::::decode_all(&mut ¬ification.message[..]); if let Err(ref e) = decoded { trace!( target: LOG_TARGET, diff --git a/client/consensus/grandpa/src/communication/periodic.rs b/client/consensus/grandpa/src/communication/periodic.rs index f3f7572864e5c..daa7529202879 100644 --- a/client/consensus/grandpa/src/communication/periodic.rs +++ b/client/consensus/grandpa/src/communication/periodic.rs @@ -108,7 +108,7 @@ impl Stream for NeighborPacketWorker { // // Note: In case poll_unpin is called after the resetted delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. - while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} + while this.delay.poll_unpin(cx).is_ready() {} if let Some((ref to, ref packet)) = this.last { return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) diff --git a/client/consensus/grandpa/src/communication/tests.rs b/client/consensus/grandpa/src/communication/tests.rs index f97b1f1e88181..10c4772fc76d6 100644 --- a/client/consensus/grandpa/src/communication/tests.rs +++ b/client/consensus/grandpa/src/communication/tests.rs @@ -114,20 +114,14 @@ impl NetworkPeers for TestNetwork { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, _protocol: ProtocolName, - _peers: HashSet, + _peers: Vec, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - fn sync_num_connected(&self) -> usize { unimplemented!(); } @@ -256,7 +250,7 @@ impl Tester { fn config() -> crate::Config { crate::Config { gossip_duration: std::time::Duration::from_millis(10), - justification_period: 256, + justification_generation_period: 256, keystore: None, name: None, local_role: Role::Authority, @@ -681,7 +675,7 @@ fn grandpa_protocol_name() { // Create protocol name using random genesis hash. let genesis_hash = sp_core::H256::random(); - let expected = format!("/{}/grandpa/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); + let expected = format!("/{}/grandpa/1", array_bytes::bytes2hex("", genesis_hash)); let proto_name = grandpa_protocol_name::standard_name(&genesis_hash, &chain_spec); assert_eq!(proto_name.to_string(), expected); diff --git a/client/consensus/grandpa/src/environment.rs b/client/consensus/grandpa/src/environment.rs index 67820a59cc943..d3e2beb84e79c 100644 --- a/client/consensus/grandpa/src/environment.rs +++ b/client/consensus/grandpa/src/environment.rs @@ -40,6 +40,8 @@ use sc_client_api::{ utils::is_descendent_of, }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_api::ApiExt; use sp_blockchain::HeaderMetadata; use sp_consensus::SelectChain as SelectChainT; use sp_consensus_grandpa::{ @@ -444,6 +446,7 @@ pub(crate) struct Environment< pub(crate) metrics: Option, pub(crate) justification_sender: Option>, pub(crate) telemetry: Option, + pub(crate) offchain_tx_pool_factory: OffchainTransactionPoolFactory, pub(crate) _phantom: PhantomData, } @@ -570,8 +573,13 @@ where // submit equivocation report at **best** block let equivocation_proof = EquivocationProof::new(authority_set.set_id, equivocation); - self.client - .runtime_api() + let mut runtime_api = self.client.runtime_api(); + + runtime_api.register_extension( + self.offchain_tx_pool_factory.offchain_transaction_pool(best_block_hash), + ); + + runtime_api .submit_report_equivocation_unsigned_extrinsic( best_block_hash, equivocation_proof, @@ -1094,7 +1102,7 @@ where finalize_block( self.client.clone(), &self.authority_set, - Some(self.config.justification_period.into()), + Some(self.config.justification_generation_period), hash, number, (round, commit).into(), @@ -1307,6 +1315,38 @@ where .or_else(|| Some((target_header.hash(), *target_header.number())))) } +/// Whether we should process a justification for the given block. +/// +/// This can be used to decide whether to import a justification (when +/// importing a block), or whether to generate a justification from a +/// commit (when validating). Justifications for blocks that change the +/// authority set will always be processed, otherwise we'll only process +/// justifications if the last one was `justification_period` blocks ago. +pub(crate) fn should_process_justification( + client: &Client, + justification_period: u32, + number: NumberFor, + enacts_change: bool, +) -> bool +where + Block: BlockT, + BE: BackendT, + Client: ClientForGrandpa, +{ + if enacts_change { + return true + } + + let last_finalized_number = client.info().finalized_number; + + // keep the first justification before reaching the justification period + if last_finalized_number.is_zero() { + return true + } + + last_finalized_number / justification_period.into() != number / justification_period.into() +} + /// Finalize the given block and apply any authority set changes. If an /// authority set change is enacted then a justification is created (if not /// given) and stored with the block when finalizing it. @@ -1314,7 +1354,7 @@ where pub(crate) fn finalize_block( client: Arc, authority_set: &SharedAuthoritySet>, - justification_period: Option>, + justification_generation_period: Option, hash: Block::Hash, number: NumberFor, justification_or_commit: JustificationOrCommit, @@ -1385,22 +1425,13 @@ where let (justification_required, justification) = match justification_or_commit { JustificationOrCommit::Justification(justification) => (true, justification), JustificationOrCommit::Commit((round_number, commit)) => { - let mut justification_required = - // justification is always required when block that enacts new authorities - // set is finalized - status.new_set_block.is_some(); - - // justification is required every N blocks to be able to prove blocks - // finalization to remote nodes - if !justification_required { - if let Some(justification_period) = justification_period { - let last_finalized_number = client.info().finalized_number; - justification_required = (!last_finalized_number.is_zero() || - number - last_finalized_number == justification_period) && - (last_finalized_number / justification_period != - number / justification_period); - } - } + let enacts_change = status.new_set_block.is_some(); + + let justification_required = justification_generation_period + .map(|period| { + should_process_justification(&*client, period, number, enacts_change) + }) + .unwrap_or(enacts_change); let justification = GrandpaJustification::from_commit(&client, round_number, commit)?; diff --git a/client/consensus/grandpa/src/import.rs b/client/consensus/grandpa/src/import.rs index cd13f832ce6dc..8481b39584729 100644 --- a/client/consensus/grandpa/src/import.rs +++ b/client/consensus/grandpa/src/import.rs @@ -28,7 +28,7 @@ use sc_consensus::{ }; use sc_telemetry::TelemetryHandle; use sc_utils::mpsc::TracingUnboundedSender; -use sp_api::{Core, RuntimeApiInfo, TransactionFor}; +use sp_api::{Core, RuntimeApiInfo}; use sp_blockchain::BlockStatus; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_consensus_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; @@ -41,7 +41,7 @@ use sp_runtime::{ use crate::{ authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}, - environment::finalize_block, + environment, justification::GrandpaJustification, notification::GrandpaJustificationSender, AuthoritySetChanges, ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, @@ -59,6 +59,7 @@ use crate::{ /// object. pub struct GrandpaBlockImport { inner: Arc, + justification_import_period: u32, select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, @@ -74,6 +75,7 @@ impl Clone fn clone(&self) -> Self { GrandpaBlockImport { inner: self.inner.clone(), + justification_import_period: self.justification_import_period, select_chain: self.select_chain.clone(), authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), @@ -232,9 +234,7 @@ where BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, - for<'a> &'a Client: - BlockImport>, - TransactionFor: 'static, + for<'a> &'a Client: BlockImport, { // check for a new authority set change. fn check_new_change( @@ -271,7 +271,7 @@ where fn make_authorities_changes( &self, - block: &mut BlockImportParams>, + block: &mut BlockImportParams, hash: Block::Hash, initial_sync: bool, ) -> Result, ConsensusError> { @@ -459,7 +459,7 @@ where /// Import whole new state and reset authority set. async fn import_state( &mut self, - mut block: BlockImportParams>, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); @@ -514,17 +514,14 @@ where BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, - for<'a> &'a Client: - BlockImport>, - TransactionFor: 'static, + for<'a> &'a Client: BlockImport, SC: Send, { type Error = ConsensusError; - type Transaction = TransactionFor; async fn import_block( &mut self, - mut block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); @@ -648,26 +645,39 @@ where match grandpa_justification { Some(justification) => { - let import_res = self.import_justification( - hash, + if environment::should_process_justification( + &*self.inner, + self.justification_import_period, number, - (GRANDPA_ENGINE_ID, justification), needs_justification, - initial_sync, - ); + ) { + let import_res = self.import_justification( + hash, + number, + (GRANDPA_ENGINE_ID, justification), + needs_justification, + initial_sync, + ); - import_res.unwrap_or_else(|err| { - if needs_justification { - debug!( - target: LOG_TARGET, - "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", - number, - err - ); - imported_aux.bad_justification = true; - imported_aux.needs_justification = true; - } - }); + import_res.unwrap_or_else(|err| { + if needs_justification { + debug!( + target: LOG_TARGET, + "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", + number, + err + ); + imported_aux.bad_justification = true; + imported_aux.needs_justification = true; + } + }); + } else { + debug!( + target: LOG_TARGET, + "Ignoring unnecessary justification for block #{}", + number, + ); + } }, None => if needs_justification { @@ -695,6 +705,7 @@ where impl GrandpaBlockImport { pub(crate) fn new( inner: Arc, + justification_import_period: u32, select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, @@ -733,6 +744,7 @@ impl GrandpaBlockImport justification, }; - let result = finalize_block( + let result = environment::finalize_block( self.inner.clone(), &self.authority_set, None, diff --git a/client/consensus/grandpa/src/justification.rs b/client/consensus/grandpa/src/justification.rs index c300a3d7ac43c..a38cb113b40a7 100644 --- a/client/consensus/grandpa/src/justification.rs +++ b/client/consensus/grandpa/src/justification.rs @@ -23,7 +23,7 @@ use std::{ }; use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::{Decode, DecodeAll, Encode}; use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_consensus_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -136,7 +136,7 @@ impl GrandpaJustification { where NumberFor: finality_grandpa::BlockNumberOps, { - let justification = GrandpaJustification::::decode(&mut &*encoded) + let justification = GrandpaJustification::::decode_all(&mut &*encoded) .map_err(|_| ClientError::JustificationDecode)?; if ( diff --git a/client/consensus/grandpa/src/lib.rs b/client/consensus/grandpa/src/lib.rs index c11e873eca738..da621abd254ca 100644 --- a/client/consensus/grandpa/src/lib.rs +++ b/client/consensus/grandpa/src/lib.rs @@ -64,12 +64,12 @@ use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ backend::{AuxStore, Backend}, utils::is_descendent_of, - BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, - StorageProvider, TransactionFor, + BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider, }; use sc_consensus::BlockImport; use sc_network::types::ProtocolName; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppCrypto; @@ -214,10 +214,10 @@ impl Clone for SharedVoterState { pub struct Config { /// The expected duration for a message to be gossiped across the network. pub gossip_duration: Duration, - /// Justification generation period (in blocks). GRANDPA will try to generate justifications - /// at least every justification_period blocks. There are some other events which might cause - /// justification generation. - pub justification_period: u32, + /// Justification generation period (in blocks). GRANDPA will try to generate + /// justifications at least every justification_generation_period blocks. There + /// are some other events which might cause justification generation. + pub justification_generation_period: u32, /// Whether the GRANDPA observer protocol is live on the network and thereby /// a full-node not running as a validator is running the GRANDPA observer /// protocol (we will only issue catch-up requests to authorities when the @@ -308,7 +308,7 @@ pub trait ClientForGrandpa: + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error> + + BlockImport + StorageProvider where BE: Backend, @@ -328,7 +328,7 @@ where + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error> + + BlockImport + StorageProvider, { } @@ -479,7 +479,6 @@ where self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, "GrandpaApi_grandpa_authorities", &[], - ExecutionStrategy::NativeElseWasm, CallContext::Offchain, ) .and_then(|call_result| { @@ -495,8 +494,16 @@ where /// Make block importer and link half necessary to tie the background voter /// to it. +/// +/// The `justification_import_period` sets the minimum period on which +/// justifications will be imported. When importing a block, if it includes a +/// justification it will only be processed if it fits within this period, +/// otherwise it will be ignored (and won't be validated). This is to avoid +/// slowing down sync by a peer serving us unnecessary justifications which +/// aren't trivial to validate. pub fn block_import( client: Arc, + justification_import_period: u32, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, telemetry: Option, @@ -508,6 +515,7 @@ where { block_import_with_authority_set_hard_forks( client, + justification_import_period, genesis_authorities_provider, select_chain, Default::default(), @@ -540,6 +548,7 @@ pub struct AuthoritySetHardFork { /// given static authorities. pub fn block_import_with_authority_set_hard_forks( client: Arc, + justification_import_period: u32, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, authority_set_hard_forks: Vec>, @@ -599,6 +608,7 @@ where Ok(( GrandpaBlockImport::new( client.clone(), + justification_import_period, select_chain.clone(), persistent_data.authority_set.clone(), voter_commands_tx, @@ -688,6 +698,11 @@ pub struct GrandpaParams { pub shared_voter_state: SharedVoterState, /// TelemetryHandle instance. pub telemetry: Option, + /// Offchain transaction pool factory. + /// + /// This will be used to create an offchain transaction pool instance for sending an + /// equivocation report from the runtime. + pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, } /// Returns the configuration value to put in @@ -718,7 +733,6 @@ pub fn run_grandpa_voter( grandpa_params: GrandpaParams, ) -> sp_blockchain::Result + Send> where - Block::Hash: Ord, BE: Backend + 'static, N: NetworkT + Sync + 'static, S: SyncingT + Sync + 'static, @@ -737,6 +751,7 @@ where prometheus_registry, shared_voter_state, telemetry, + offchain_tx_pool_factory, } = grandpa_params; // NOTE: we have recently removed `run_grandpa_observer` from the public @@ -811,6 +826,7 @@ where shared_voter_state, justification_sender, telemetry, + offchain_tx_pool_factory, ); let voter_work = voter_work.map(|res| match res { @@ -880,6 +896,7 @@ where shared_voter_state: SharedVoterState, justification_sender: GrandpaJustificationSender, telemetry: Option, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, ) -> Self { let metrics = match prometheus_registry.as_ref().map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), @@ -904,6 +921,7 @@ where metrics: metrics.as_ref().map(|m| m.environment.clone()), justification_sender: Some(justification_sender), telemetry: telemetry.clone(), + offchain_tx_pool_factory, _phantom: PhantomData, }); @@ -1055,6 +1073,7 @@ where metrics: self.env.metrics.clone(), justification_sender: self.env.justification_sender.clone(), telemetry: self.telemetry.clone(), + offchain_tx_pool_factory: self.env.offchain_tx_pool_factory.clone(), _phantom: PhantomData, }); diff --git a/client/consensus/grandpa/src/tests.rs b/client/consensus/grandpa/src/tests.rs index c46e249be485c..0175f7d1b473c 100644 --- a/client/consensus/grandpa/src/tests.rs +++ b/client/consensus/grandpa/src/tests.rs @@ -33,6 +33,7 @@ use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestClient, TestNetFactory, }; +use sc_transaction_pool_api::RejectAllTxPool; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_consensus_grandpa::{ @@ -68,6 +69,8 @@ type GrandpaBlockImport = crate::GrandpaBlockImport< LongestChain, >; +const JUSTIFICATION_IMPORT_PERIOD: u32 = 32; + #[derive(Default)] struct GrandpaTestNet { peers: Vec, @@ -125,6 +128,7 @@ impl TestNetFactory for GrandpaTestNet { let (client, backend) = (client.as_client(), client.as_backend()); let (import, link) = block_import( client.clone(), + JUSTIFICATION_IMPORT_PERIOD, &self.test_config, LongestChain::new(backend.clone()), None, @@ -317,7 +321,7 @@ fn initialize_grandpa( let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: Some(keystore), name: Some(format!("peer#{}", peer_id)), local_role: Role::Authority, @@ -331,6 +335,9 @@ fn initialize_grandpa( voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), telemetry: None, }; let voter = @@ -442,11 +449,14 @@ async fn finalize_3_voters_no_observers() { let net = Arc::new(Mutex::new(net)); run_to_completion(20, net.clone(), peers).await; - // normally there's no justification for finalized blocks - assert!( - net.lock().peer(0).client().justifications(hashof20).unwrap().is_none(), - "Extra justification for block#1", - ); + // all peers should have stored the justification for the best finalized block #20 + for peer_id in 0..3 { + let client = net.lock().peers[peer_id].client().as_client(); + let justification = + crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); + + assert_eq!(justification.justification.commit.target_number, 20); + } } #[tokio::test] @@ -466,7 +476,7 @@ async fn finalize_3_voters_1_full_observer() { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: None, name: Some(format!("peer#{}", peer_id)), local_role: Role::Authority, @@ -481,6 +491,9 @@ async fn finalize_3_voters_1_full_observer() { prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; run_grandpa_voter(grandpa_params).expect("all in order with client and network") @@ -558,7 +571,7 @@ async fn transition_3_voters_twice_1_full_observer() { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: Some(keystore), name: Some(format!("peer#{}", peer_id)), local_role: Role::Authority, @@ -573,6 +586,9 @@ async fn transition_3_voters_twice_1_full_observer() { prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; voters @@ -685,8 +701,8 @@ async fn justification_is_generated_periodically() { let net = Arc::new(Mutex::new(net)); run_to_completion(32, net.clone(), peers).await; - // when block#32 (justification_period) is finalized, justification - // is required => generated + // when block#32 (justification_generation_period) is finalized, + // justification is required => generated for i in 0..3 { assert!(net.lock().peer(i).client().justifications(hashof32).unwrap().is_some()); } @@ -983,7 +999,7 @@ async fn voter_persists_its_votes() { let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: Some(bob_keystore.clone()), name: Some(format!("peer#{}", 1)), local_role: Role::Authority, @@ -1025,7 +1041,7 @@ async fn voter_persists_its_votes() { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: Some(keystore), name: Some(format!("peer#{}", 0)), local_role: Role::Authority, @@ -1040,6 +1056,9 @@ async fn voter_persists_its_votes() { prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; run_grandpa_voter(grandpa_params).expect("all in order with client and network") @@ -1068,7 +1087,7 @@ async fn voter_persists_its_votes() { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: Some(keystore), name: Some(format!("peer#{}", 0)), local_role: Role::Authority, @@ -1083,6 +1102,9 @@ async fn voter_persists_its_votes() { prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; run_grandpa_voter(grandpa_params) @@ -1230,7 +1252,7 @@ async fn finalize_3_voters_1_light_observer() { let observer = observer::run_grandpa_observer( Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore: None, name: Some("observer".to_string()), local_role: Role::Full, @@ -1278,7 +1300,7 @@ async fn voter_catches_up_to_latest_round_when_behind() { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore, name: Some(format!("peer#{}", peer_id)), local_role: Role::Authority, @@ -1293,6 +1315,9 @@ async fn voter_catches_up_to_latest_round_when_behind() { prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), telemetry: None, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) @@ -1390,7 +1415,7 @@ where let config = Config { gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, + justification_generation_period: 32, keystore, name: None, local_role: Role::Authority, @@ -1422,6 +1447,7 @@ where justification_sender: None, telemetry: None, _phantom: PhantomData, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(RejectAllTxPool::default()), } } @@ -1883,24 +1909,19 @@ async fn imports_justification_for_regular_blocks_on_import() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_client(); - let builder = full_client - .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) - .unwrap(); - let block = builder.build().unwrap().block; - let block_hash = block.hash(); + // create a new block (without importing it) + let generate_block = |parent| { + let builder = full_client.new_block_at(parent, Default::default(), false).unwrap(); + builder.build().unwrap().block + }; // create a valid justification, with one precommit targeting the block - let justification = { - let round = 1; + let make_justification = |round, hash, number| { let set_id = 0; - let precommit = finality_grandpa::Precommit { - target_hash: block_hash, - target_number: *block.header.number(), - }; + let precommit = finality_grandpa::Precommit { target_hash: hash, target_number: number }; let msg = finality_grandpa::Message::Precommit(precommit.clone()); let encoded = sp_consensus_grandpa::localized_payload(round, set_id, &msg); @@ -1913,33 +1934,59 @@ async fn imports_justification_for_regular_blocks_on_import() { }; let commit = finality_grandpa::Commit { - target_hash: block_hash, - target_number: *block.header.number(), + target_hash: hash, + target_number: number, precommits: vec![precommit], }; GrandpaJustification::from_commit(&full_client, round, commit).unwrap() }; - // we import the block with justification attached - let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justifications = Some((GRANDPA_ENGINE_ID, justification.encode()).into()); - import.body = Some(block.extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + let mut generate_and_import_block_with_justification = |parent| { + // we import the block with justification attached + let block = generate_block(parent); + let block_hash = block.hash(); + let justification = make_justification(1, block_hash, *block.header.number()); - assert_eq!( - block_import.import_block(import).await.unwrap(), - ImportResult::Imported(ImportedAux { - needs_justification: false, - clear_justification_requests: false, - bad_justification: false, - is_new_best: true, - ..Default::default() - }), - ); + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justifications = Some((GRANDPA_ENGINE_ID, justification.encode()).into()); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + assert_eq!( + // NOTE: we use `block_on` here because async closures are + // unsupported and it doesn't matter if we block in a test + futures::executor::block_on(block_import.import_block(import)).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: false, + clear_justification_requests: false, + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + + block_hash + }; + + let block1 = + generate_and_import_block_with_justification(full_client.chain_info().genesis_hash); // the justification should be imported and available from the client - assert!(client.justifications(block_hash).unwrap().is_some()); + assert!(client.justifications(block1).unwrap().is_some()); + + // subsequent justifications should be ignored and not imported + let mut parent = block1; + for _ in 2..JUSTIFICATION_IMPORT_PERIOD { + parent = generate_and_import_block_with_justification(parent); + assert!(client.justifications(parent).unwrap().is_none()); + } + + let block32 = generate_and_import_block_with_justification(parent); + + // until we reach a block in the next justification import period, at + // which point we should import it + assert!(client.justifications(block32).unwrap().is_some()); } #[tokio::test] @@ -1986,7 +2033,7 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { // keys it should work equivocation.identity = TryFrom::try_from(&[1; 32][..]).unwrap(); let equivocation_proof = sp_consensus_grandpa::Equivocation::Prevote(equivocation); - assert!(environment.report_equivocation(equivocation_proof).is_ok()); + environment.report_equivocation(equivocation_proof).unwrap(); } #[tokio::test] diff --git a/client/consensus/grandpa/src/warp_proof.rs b/client/consensus/grandpa/src/warp_proof.rs index ec2d25c328bf8..9acf1f2187793 100644 --- a/client/consensus/grandpa/src/warp_proof.rs +++ b/client/consensus/grandpa/src/warp_proof.rs @@ -16,7 +16,7 @@ //! Utilities for generating and verifying GRANDPA warp sync proofs. -use sp_runtime::codec::{self, Decode, Encode}; +use parity_scale_codec::{Decode, DecodeAll, Encode}; use crate::{ best_justification, find_scheduled_change, AuthoritySetChanges, AuthoritySetHardFork, @@ -38,7 +38,7 @@ use std::{collections::HashMap, sync::Arc}; pub enum Error { /// Decoding error. #[error("Failed to decode block hash: {0}.")] - DecodeScale(#[from] codec::Error), + DecodeScale(#[from] parity_scale_codec::Error), /// Client backend error. #[error("{0}")] Client(#[from] sp_blockchain::Error), @@ -137,7 +137,7 @@ impl WarpSyncProof { .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .ok_or_else(|| Error::MissingData)?; - let justification = GrandpaJustification::::decode(&mut &justification[..])?; + let justification = GrandpaJustification::::decode_all(&mut &justification[..])?; let proof = WarpSyncFragment { header: header.clone(), justification }; let proof_size = proof.encoded_size(); @@ -291,7 +291,7 @@ where authorities: AuthorityList, ) -> Result, Box> { let EncodedProof(proof) = proof; - let proof = WarpSyncProof::::decode(&mut proof.as_slice()) + let proof = WarpSyncProof::::decode_all(&mut proof.as_slice()) .map_err(|e| format!("Proof decoding error: {:?}", e))?; let last_header = proof .proofs @@ -318,8 +318,9 @@ where #[cfg(test)] mod tests { - use super::{codec::Encode, WarpSyncProof}; + use super::WarpSyncProof; use crate::{AuthoritySetChanges, GrandpaJustification}; + use parity_scale_codec::Encode; use rand::prelude::*; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 2ddcf0d772890..4bc389ac3f4cd 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } assert_matches = "1.3.0" async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" @@ -36,10 +36,10 @@ sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/c sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } [dev-dependencies] diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index b54ec5e41b750..2cc2b902b1ce9 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -30,9 +30,6 @@ pub mod timestamp; /// Consensus data provider, manual seal uses this trait object for authoring blocks valid /// for any runtime. pub trait ConsensusDataProvider: Send + Sync { - /// Block import transaction type - type Transaction; - /// The proof type. type Proof; @@ -43,7 +40,7 @@ pub trait ConsensusDataProvider: Send + Sync { fn append_block_import( &self, parent: &B::Header, - params: &mut BlockImportParams, + params: &mut BlockImportParams, inherents: &InherentData, proof: Self::Proof, ) -> Result<(), Error>; diff --git a/client/consensus/manual-seal/src/consensus/aura.rs b/client/consensus/manual-seal/src/consensus/aura.rs index 92203f91826f0..566a2266c701b 100644 --- a/client/consensus/manual-seal/src/consensus/aura.rs +++ b/client/consensus/manual-seal/src/consensus/aura.rs @@ -22,7 +22,7 @@ use crate::{ConsensusDataProvider, Error}; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus::BlockImportParams; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_aura::{ digests::CompatibleDigestItem, @@ -69,7 +69,6 @@ where C::Api: AuraApi, P: Send + Sync, { - type Transaction = TransactionFor; type Proof = P; fn create_digest( @@ -92,7 +91,7 @@ where fn append_block_import( &self, _parent: &B::Header, - _params: &mut BlockImportParams, + _params: &mut BlockImportParams, _inherents: &InherentData, _proof: Self::Proof, ) -> Result<(), Error> { diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 2485bd603e785..26fa81459808c 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -33,7 +33,7 @@ use sp_keystore::KeystorePtr; use std::{marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, @@ -97,8 +97,8 @@ where { async fn verify( &mut self, - mut import_params: BlockImportParams, - ) -> Result, String> { + mut import_params: BlockImportParams, + ) -> Result, String> { import_params.finalized = false; import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -197,7 +197,6 @@ where C::Api: BabeApi, P: Send + Sync, { - type Transaction = TransactionFor; type Proof = P; fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { @@ -264,7 +263,7 @@ where fn append_block_import( &self, parent: &B::Header, - params: &mut BlockImportParams, + params: &mut BlockImportParams, inherents: &InherentData, _proof: Self::Proof, ) -> Result<(), Error> { diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 03c9418b5c560..c3b891b84e893 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -52,7 +52,7 @@ pub use self::{ seal_block::{seal_block, SealBlockParams, MAX_PROPOSAL_DURATION}, }; use sc_transaction_pool_api::TransactionPool; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_api::ProvideRuntimeApi; const LOG_TARGET: &str = "manual-seal"; @@ -66,8 +66,8 @@ struct ManualSealVerifier; impl Verifier for ManualSealVerifier { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { block.finalized = false; block.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok(block) @@ -75,14 +75,13 @@ impl Verifier for ManualSealVerifier { } /// Instantiate the import queue for the manual seal consensus engine. -pub fn import_queue( - block_import: BoxBlockImport, +pub fn import_queue( + block_import: BoxBlockImport, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, -) -> BasicQueue +) -> BasicQueue where Block: BlockT, - Transaction: Send + Sync + 'static, { BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } @@ -109,8 +108,7 @@ pub struct ManualSealParams, TP, SC, C pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: - Option>>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -134,8 +132,7 @@ pub struct InstantSealParams, TP, SC, pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: - Option>>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -167,17 +164,13 @@ pub async fn run_manual_seal( }: ManualSealParams, ) where B: BlockT + 'static, - BI: BlockImport> - + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, - TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, P: Send + Sync + 'static, @@ -230,16 +223,12 @@ pub async fn run_instant_seal( }: InstantSealParams, ) where B: BlockT + 'static, - BI: BlockImport> - + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer, SC: SelectChain + 'static, - TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, P: Send + Sync + 'static, @@ -284,16 +273,12 @@ pub async fn run_instant_seal_and_finalize( }: InstantSealParams, ) where B: BlockT + 'static, - BI: BlockImport> - + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer>, + E::Proposer: Proposer, SC: SelectChain + 'static, - TransactionFor: 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, P: Send + Sync + 'static, @@ -386,7 +371,6 @@ mod tests { B: BlockT, C: ProvideRuntimeApi + Send + Sync, { - type Transaction = TransactionFor; type Proof = (); fn create_digest( @@ -400,7 +384,7 @@ mod tests { fn append_block_import( &self, _parent: &B::Header, - params: &mut BlockImportParams, + params: &mut BlockImportParams, _inherents: &InherentData, _proof: Self::Proof, ) -> Result<(), Error> { diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index e6133bccae885..4b6230c3efc3d 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -22,7 +22,7 @@ use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; use sc_transaction_pool_api::TransactionPool; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; @@ -52,8 +52,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP /// SelectChain object pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: - Option<&'a dyn ConsensusDataProvider>>, + pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -77,16 +76,12 @@ pub async fn seal_block( }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, ) where B: BlockT, - BI: BlockImport> - + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - E::Proposer: Proposer>, + E::Proposer: Proposer, TP: TransactionPool, SC: SelectChain, - TransactionFor: 'static, CIDP: CreateInherentDataProviders, P: Send + Sync + 'static, { diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index b5454e35f994e..91c9754508591 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" @@ -28,6 +28,6 @@ sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-bu sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-pow = { version = "0.10.0-dev", path = "../../../primitives/consensus/pow" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 913686b7bf36d..ee5c1dfc6f11a 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -58,7 +58,6 @@ use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; -use sp_core::ExecutionContext; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ generic::{BlockId, Digest, DigestItem}, @@ -238,7 +237,7 @@ impl Clone impl PowBlockImport where B: BlockT, - I: BlockImport> + Send + Sync, + I: BlockImport + Send + Sync, I::Error: Into, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, @@ -269,7 +268,6 @@ where block: B, at_hash: B::Hash, inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, ) -> Result<(), Error> { if *block.header().number() < self.check_inherents_after { return Ok(()) @@ -283,7 +281,7 @@ where let inherent_res = self .client .runtime_api() - .check_inherents_with_context(at_hash, execution_context, block, inherent_data) + .check_inherents(at_hash, block, inherent_data) .map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { @@ -303,7 +301,7 @@ where impl BlockImport for PowBlockImport where B: BlockT, - I: BlockImport> + Send + Sync, + I: BlockImport + Send + Sync, I::Error: Into, S: SelectChain, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, @@ -313,7 +311,6 @@ where CIDP: CreateInherentDataProviders + Send + Sync, { type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; async fn check_block( &mut self, @@ -324,7 +321,7 @@ where async fn import_block( &mut self, - mut block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { let best_header = self .select_chain @@ -348,7 +345,6 @@ where self.create_inherent_data_providers .create_inherent_data_providers(parent_hash, ()) .await?, - block.origin.into(), ) .await?; } @@ -447,8 +443,8 @@ where { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { let hash = block.header.hash(); let (checked_header, seal) = self.check_header(block.header)?; @@ -463,19 +459,18 @@ where } /// The PoW import queue type. -pub type PowImportQueue = BasicQueue; +pub type PowImportQueue = BasicQueue; /// Import queue for PoW engine. -pub fn import_queue( - block_import: BoxBlockImport, +pub fn import_queue( + block_import: BoxBlockImport, justification_import: Option>, algorithm: Algorithm, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, -) -> Result, sp_consensus::Error> +) -> Result, sp_consensus::Error> where B: BlockT, - Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, Algorithm::Difficulty: Send, { @@ -494,7 +489,7 @@ where /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. pub fn start_mining_worker( - block_import: BoxBlockImport>, + block_import: BoxBlockImport, client: Arc, select_chain: S, algorithm: Algorithm, @@ -506,18 +501,18 @@ pub fn start_mining_worker( timeout: Duration, build_time: Duration, ) -> ( - MiningHandle>::Proof>, + MiningHandle>::Proof>, impl Future, ) where Block: BlockT, - C: ProvideRuntimeApi + BlockchainEvents + 'static, + C: BlockchainEvents + 'static, S: SelectChain + 'static, Algorithm: PowAlgorithm + Clone, Algorithm::Difficulty: Send + 'static, E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, - E::Proposer: Proposer>, + E::Proposer: Proposer, SO: SyncOracle + Clone + Send + Sync + 'static, L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, @@ -635,7 +630,7 @@ where }, }; - let build = MiningBuild:: { + let build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 3cb5dfcc09260..9e9c4fc137d86 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -56,16 +56,11 @@ pub struct MiningMetadata { } /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild< - Block: BlockT, - Algorithm: PowAlgorithm, - C: sp_api::ProvideRuntimeApi, - Proof, -> { +pub struct MiningBuild, Proof> { /// Mining metadata. pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal, Proof>, + pub proposal: Proposal, } /// Version of the mining worker. @@ -76,25 +71,22 @@ pub struct Version(usize); pub struct MiningHandle< Block: BlockT, Algorithm: PowAlgorithm, - C: sp_api::ProvideRuntimeApi, L: sc_consensus::JustificationSyncLink, Proof, > { version: Arc, algorithm: Arc, justification_sync_link: Arc, - build: Arc>>>, - block_import: Arc>>>, + build: Arc>>>, + block_import: Arc>>, } -impl MiningHandle +impl MiningHandle where Block: BlockT, - C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static + Send, L: sc_consensus::JustificationSyncLink, - sp_api::TransactionFor: Send + 'static, { fn increment_version(&self) { self.version.fetch_add(1, Ordering::SeqCst); @@ -102,7 +94,7 @@ where pub(crate) fn new( algorithm: Algorithm, - block_import: BoxBlockImport>, + block_import: BoxBlockImport, justification_sync_link: L, ) -> Self { Self { @@ -120,7 +112,7 @@ where self.increment_version(); } - pub(crate) fn on_build(&self, value: MiningBuild) { + pub(crate) fn on_build(&self, value: MiningBuild) { let mut build = self.build.lock(); *build = Some(value); self.increment_version(); @@ -224,11 +216,10 @@ where } } -impl Clone for MiningHandle +impl Clone for MiningHandle where Block: BlockT, Algorithm: PowAlgorithm, - C: sp_api::ProvideRuntimeApi, L: sc_consensus::JustificationSyncLink, { fn clone(&self) -> Self { diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 5cacf4f476281..67eeae5317abb 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -15,21 +15,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } -sp-arithmetic = { version = "6.0.0", path = "../../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 5057e7858a0bc..5ee93d1686437 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -41,7 +41,7 @@ use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use std::{ fmt::Debug, ops::Deref, @@ -53,8 +53,7 @@ const LOG_TARGET: &str = "slots"; /// The changes that need to applied to the storage to create the state for a block. /// /// See [`sp_state_machine::StorageChanges`] for more information. -pub type StorageChanges = - sp_state_machine::StorageChanges>; +pub type StorageChanges = sp_state_machine::StorageChanges>; /// The result of [`SlotWorker::on_slot`]. #[derive(Debug, Clone)] @@ -84,9 +83,7 @@ pub trait SlotWorker { #[async_trait::async_trait] pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. - type BlockImport: BlockImport>::Transaction> - + Send - + 'static; + type BlockImport: BlockImport + Send + 'static; /// A handle to a `SyncOracle`. type SyncOracle: SyncOracle; @@ -148,13 +145,10 @@ pub trait SimpleSlotWorker { header: B::Header, header_hash: &B::Hash, body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, + storage_changes: StorageChanges, public: Self::Claim, aux_data: Self::AuxData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, - >; + ) -> Result, sp_consensus::Error>; /// Whether to force authoring if offline. fn force_authoring(&self) -> bool; @@ -191,13 +185,7 @@ pub trait SimpleSlotWorker { claim: &Self::Claim, slot_info: SlotInfo, end_proposing_at: Instant, - ) -> Option< - Proposal< - B, - >::Transaction, - >::Proof, - >, - > { + ) -> Option>::Proof>> { let slot = slot_info.slot; let telemetry = self.telemetry(); let log_target = self.logging_target(); diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index aeb31f9444faf..1845158dac112 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } hash-db = "0.16.0" @@ -27,13 +27,13 @@ parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } schnellru = "0.2.1" -sp-arithmetic = { version = "6.0.0", path = "../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } -sp-trie = { version = "7.0.0", path = "../../primitives/trie" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } +sp-trie = { version = "22.0.0", path = "../../primitives/trie" } [dev-dependencies] criterion = "0.4.0" @@ -42,14 +42,17 @@ rand = "0.8.5" tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } kitchensink-runtime = { path = "../../bin/node/runtime" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -array-bytes = "4.1" +array-bytes = "6.1" [features] default = [] test-helpers = [] -runtime-benchmarks = [] +runtime-benchmarks = [ + "kitchensink-runtime/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] rocksdb = ["kvdb-rocksdb"] [[bench]] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b1fe3f206f58b..38c37a42ede79 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -28,12 +28,12 @@ use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, }; use sp_runtime::{ - traits::{Block as BlockT, HashFor}, + traits::{Block as BlockT, HashingFor}, StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, IterArgs, StorageCollection, - StorageIterator, StorageKey, StorageValue, + backend::Backend as StateBackend, BackendTransaction, ChildStorageCollection, DBValue, + IterArgs, StorageCollection, StorageIterator, StorageKey, StorageValue, }; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, @@ -52,9 +52,9 @@ struct StorageDb { _block: std::marker::PhantomData, } -impl sp_state_machine::Storage> for StorageDb { +impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let prefixed_key = prefixed_key::>(key, prefix); + let prefixed_key = prefixed_key::>(key, prefix); self.db .get(0, &prefixed_key) .map_err(|e| format!("Database backend error: {:?}", e)) @@ -84,19 +84,19 @@ pub struct BenchmarkingState { record: Cell>>, key_tracker: Arc>, whitelist: RefCell>, - proof_recorder: Option>>, + proof_recorder: Option>>, proof_recorder_root: Cell, - shared_trie_cache: SharedTrieCache>, + shared_trie_cache: SharedTrieCache>, } /// A raw iterator over the `BenchmarkingState`. pub struct RawIter { - inner: as StateBackend>>::RawIter, + inner: as StateBackend>>::RawIter, child_trie: Option>, key_tracker: Arc>, } -impl StorageIterator> for RawIter { +impl StorageIterator> for RawIter { type Backend = BenchmarkingState; type Error = String; @@ -138,8 +138,8 @@ impl BenchmarkingState { ) -> Result { let state_version = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + let mut mdb = MemoryDB::>::default(); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); let mut state = BenchmarkingState { state: RefCell::new(None), @@ -341,10 +341,9 @@ fn state_err() -> String { "State is not open".into() } -impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; +impl StateBackend> for BenchmarkingState { + type Error = as StateBackend>>::Error; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; type RawIter = RawIter; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -423,10 +422,7 @@ impl StateBackend> for BenchmarkingState { &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, BackendTransaction>) { self.state .borrow() .as_ref() @@ -438,10 +434,7 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, bool, BackendTransaction>) { self.state .borrow() .as_ref() @@ -465,8 +458,8 @@ impl StateBackend> for BenchmarkingState { fn commit( &self, - storage_root: as Hasher>::Out, - mut transaction: Self::Transaction, + storage_root: as Hasher>::Out, + mut transaction: BackendTransaction>, main_storage_changes: StorageCollection, child_storage_changes: ChildStorageCollection, ) -> Result<(), Self::Error> { @@ -620,7 +613,8 @@ impl StateBackend> for BenchmarkingState { log::debug!(target: "benchmark", "Some proof size: {}", &proof_size); proof_size } else { - if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { + if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) + { size as u32 } else if proof_recorder_root == self.root.get() { log::debug!(target: "benchmark", "No changes - no proof"); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c62b9ce0e7a61..73fb4f8ce6db3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -79,16 +79,16 @@ use sp_database::Transaction; use sp_runtime::{ generic::BlockId, traits::{ - Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, + Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, - ChildStorageCollection, DBValue, IndexOperation, IterArgs, OffchainChangesCollection, - StateMachineStats, StorageCollection, StorageIterator, StorageKey, StorageValue, - UsageInfo as StateUsageInfo, + BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs, + OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey, + StorageValue, UsageInfo as StateUsageInfo, }; use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; @@ -102,12 +102,12 @@ const CACHE_HEADERS: usize = 8; /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = - sp_state_machine::TrieBackend>>, HashFor>; + sp_state_machine::TrieBackend>>, HashingFor>; /// Builder for [`DbState`]. pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder< - Arc>>, - HashFor, + Arc>>, + HashingFor, >; /// Length of a [`DbHash`]. @@ -162,12 +162,12 @@ impl std::fmt::Debug for RefTrackingState { /// A raw iterator over the `RefTrackingState`. pub struct RawIter { - inner: as StateBackend>>::RawIter, + inner: as StateBackend>>::RawIter, } -impl StorageIterator> for RawIter { +impl StorageIterator> for RawIter { type Backend = RefTrackingState; - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; fn next_key(&mut self, backend: &Self::Backend) -> Option> { self.inner.next_key(&backend.state) @@ -185,10 +185,9 @@ impl StorageIterator> for RawIter { } } -impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; +impl StateBackend> for RefTrackingState { + type Error = as StateBackend>>::Error; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; type RawIter = RawIter; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -243,10 +242,7 @@ impl StateBackend> for RefTrackingState { &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, BackendTransaction>) { self.state.storage_root(delta, state_version) } @@ -255,10 +251,7 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, bool, BackendTransaction>) { self.state.child_storage_root(child_info, delta, state_version) } @@ -275,12 +268,12 @@ impl StateBackend> for RefTrackingState { } } -impl AsTrieBackend> for RefTrackingState { - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; +impl AsTrieBackend> for RefTrackingState { + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn as_trie_backend( &self, - ) -> &sp_state_machine::TrieBackend> { + ) -> &sp_state_machine::TrieBackend> { &self.state.as_trie_backend() } } @@ -824,7 +817,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: RecordStatsState, Block>, - db_updates: PrefixedMemoryDB>, + db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, @@ -913,7 +906,10 @@ impl sc_client_api::backend::BlockImportOperation Ok(()) } - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + fn update_db_storage( + &mut self, + update: PrefixedMemoryDB>, + ) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -992,10 +988,10 @@ struct StorageDb { prefix_keys: bool, } -impl sp_state_machine::Storage> for StorageDb { +impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { if self.prefix_keys { - let key = prefixed_key::>(key, prefix); + let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) } else { self.state_db.get(key.as_ref(), self) @@ -1015,16 +1011,16 @@ impl sc_state_db::NodeDb for StorageDb { struct DbGenesisStorage { root: Block::Hash, - storage: PrefixedMemoryDB>, + storage: PrefixedMemoryDB>, } impl DbGenesisStorage { - pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { + pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { DbGenesisStorage { root, storage } } } -impl sp_state_machine::Storage> for DbGenesisStorage { +impl sp_state_machine::Storage> for DbGenesisStorage { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { use hash_db::HashDB; Ok(self.storage.get(key, prefix)) @@ -1036,14 +1032,15 @@ struct EmptyStorage(pub Block::Hash); impl EmptyStorage { pub fn new() -> Self { let mut root = Block::Hash::default(); - let mut mdb = MemoryDB::>::default(); + let mut mdb = MemoryDB::>::default(); // both triedbmut are the same on empty storage. - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root) + .build(); EmptyStorage(root) } } -impl sp_state_machine::Storage> for EmptyStorage { +impl sp_state_machine::Storage> for EmptyStorage { fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } @@ -1104,7 +1101,7 @@ pub struct Backend { io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, - shared_trie_cache: Option>>, + shared_trie_cache: Option>>, } impl Backend { @@ -1130,6 +1127,13 @@ impl Backend { Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init) } + /// Reset the shared trie cache. + pub fn reset_trie_cache(&self) { + if let Some(cache) = &self.shared_trie_cache { + cache.reset(); + } + } + /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { @@ -1172,7 +1176,7 @@ impl Backend { /// /// Should only be needed for benchmarking. #[cfg(any(feature = "runtime-benchmarks"))] - pub fn expose_storage(&self) -> Arc>> { + pub fn expose_storage(&self) -> Arc>> { self.storage.clone() } @@ -3541,8 +3545,8 @@ pub(crate) mod tests { let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); - let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); - let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); + let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); + let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); let index = vec![ IndexOperation::Insert { extrinsic: 0, @@ -3585,8 +3589,8 @@ pub(crate) mod tests { let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); - let x0_hash = as sp_core::Hasher>::hash(&x0[..]); - let x1_hash = as sp_core::Hasher>::hash(&x1[..]); + let x0_hash = as sp_core::Hasher>::hash(&x0[..]); + let x1_hash = as sp_core::Hasher>::hash(&x1[..]); let index = vec![ IndexOperation::Insert { extrinsic: 0, @@ -3620,7 +3624,7 @@ pub(crate) mod tests { let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); - let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); + let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); for i in 0..10 { let mut index = Vec::new(); if i == 0 { @@ -3913,6 +3917,38 @@ pub(crate) mod tests { assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); } + #[test] + fn revert_finalized_blocks() { + let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll]; + + // we will create a chain with 11 blocks, finalize block #8 and then + // attempt to revert 5 blocks. + for pruning_mode in pruning_modes { + let backend = Backend::::new_test_with_tx_storage(pruning_mode, 1); + + let mut parent = Default::default(); + for i in 0..=10 { + parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None) + .unwrap(); + } + + assert_eq!(backend.blockchain().info().best_number, 10); + + let block8 = backend.blockchain().hash(8).unwrap().unwrap(); + backend.finalize_block(block8, None).unwrap(); + backend.revert(5, true).unwrap(); + + match pruning_mode { + // we can only revert to blocks for which we have state, if pruning is enabled + // then the last state available will be that of the latest finalized block + BlocksPruning::Some(_) => + assert_eq!(backend.blockchain().info().finalized_number, 8), + // otherwise if we're not doing state pruning we can revert past finalized blocks + _ => assert_eq!(backend.blockchain().info().finalized_number, 5), + } + } + } + #[test] fn test_no_duplicated_leaves_allowed() { let backend: Backend = Backend::new_test(10, 10); diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 01562081a8f4f..b7068f2430ef7 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -54,16 +54,16 @@ pub fn open>( ]; for i in compressed { - let mut column = &mut config.columns[i as usize]; + let column = &mut config.columns[i as usize]; column.compression = parity_db::CompressionType::Lz4; } - let mut state_col = &mut config.columns[columns::STATE as usize]; + let state_col = &mut config.columns[columns::STATE as usize]; state_col.ref_counted = true; state_col.preimage = true; state_col.uniform = true; - let mut tx_col = &mut config.columns[columns::TRANSACTION as usize]; + let tx_col = &mut config.columns[columns::TRANSACTION as usize]; tx_col.ref_counted = true; tx_col.preimage = true; tx_col.uniform = true; diff --git a/client/db/src/pinned_blocks_cache.rs b/client/db/src/pinned_blocks_cache.rs index 7b346b4631eee..46c9287fb19ac 100644 --- a/client/db/src/pinned_blocks_cache.rs +++ b/client/db/src/pinned_blocks_cache.rs @@ -168,7 +168,7 @@ impl PinnedBlocksCache { /// Attach body to an existing cache item pub fn insert_body(&mut self, hash: Block::Hash, extrinsics: Option>) { match self.cache.peek_mut(&hash) { - Some(mut entry) => { + Some(entry) => { entry.body = Some(extrinsics); log::trace!( target: LOG_TARGET, @@ -192,7 +192,7 @@ impl PinnedBlocksCache { justifications: Option, ) { match self.cache.peek_mut(&hash) { - Some(mut entry) => { + Some(entry) => { entry.justifications = Some(justifications); log::trace!( target: LOG_TARGET, diff --git a/client/db/src/record_stats_state.rs b/client/db/src/record_stats_state.rs index 2776802dde04e..29ece84f97e57 100644 --- a/client/db/src/record_stats_state.rs +++ b/client/db/src/record_stats_state.rs @@ -21,12 +21,12 @@ use crate::stats::StateUsageStats; use sp_core::storage::ChildInfo; use sp_runtime::{ - traits::{Block as BlockT, HashFor}, + traits::{Block as BlockT, HashingFor}, StateVersion, }; use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, - IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend, + BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend, }; use std::sync::Arc; @@ -56,7 +56,7 @@ impl Drop for RecordStatsState { } } -impl>, B: BlockT> RecordStatsState { +impl>, B: BlockT> RecordStatsState { /// Create a new instance wrapping generic State and shared cache. pub(crate) fn new( state: S, @@ -75,15 +75,15 @@ impl>, B: BlockT> RecordStatsState { pub struct RawIter where - S: StateBackend>, + S: StateBackend>, B: BlockT, { - inner: >>::RawIter, + inner: >>::RawIter, } -impl StorageIterator> for RawIter +impl StorageIterator> for RawIter where - S: StateBackend>, + S: StateBackend>, B: BlockT, { type Backend = RecordStatsState; @@ -105,9 +105,10 @@ where } } -impl>, B: BlockT> StateBackend> for RecordStatsState { +impl>, B: BlockT> StateBackend> + for RecordStatsState +{ type Error = S::Error; - type Transaction = S::Transaction; type TrieBackendStorage = S::TrieBackendStorage; type RawIter = RawIter; @@ -171,10 +172,7 @@ impl>, B: BlockT> StateBackend> for Record &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, BackendTransaction>) { self.state.storage_root(delta, state_version) } @@ -183,10 +181,7 @@ impl>, B: BlockT> StateBackend> for Record child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { + ) -> (B::Hash, bool, BackendTransaction>) { self.state.child_storage_root(child_info, delta, state_version) } @@ -205,12 +200,12 @@ impl>, B: BlockT> StateBackend> for Record } } -impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> - for RecordStatsState +impl> + AsTrieBackend>, B: BlockT> + AsTrieBackend> for RecordStatsState { - type TrieBackendStorage = >>::TrieBackendStorage; + type TrieBackendStorage = >>::TrieBackendStorage; - fn as_trie_backend(&self) -> &TrieBackend> { + fn as_trie_backend(&self) -> &TrieBackend> { self.state.as_trie_backend() } } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 21a9bd70dde65..6a056ce7dfa34 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -14,36 +14,34 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -lru = "0.8.1" parking_lot = "0.12.1" +schnellru = "0.2.1" tracing = "0.1.29" -wasmi = "0.13.2" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor-common = { version = "0.10.0-dev", path = "common" } -sc-executor-wasmi = { version = "0.10.0-dev", path = "wasmi" } sc-executor-wasmtime = { version = "0.10.0-dev", path = "wasmtime" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-externalities = { version = "0.13.0", path = "../../primitives/externalities" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } -sp-panic-handler = { version = "5.0.0", path = "../../primitives/panic-handler" } -sp-runtime-interface = { version = "7.0.0", path = "../../primitives/runtime-interface" } -sp-trie = { version = "7.0.0", path = "../../primitives/trie" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } -sp-wasm-interface = { version = "7.0.0", path = "../../primitives/wasm-interface" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-panic-handler = { version = "8.0.0", path = "../../primitives/panic-handler" } +sp-runtime-interface = { version = "17.0.0", path = "../../primitives/runtime-interface" } +sp-trie = { version = "22.0.0", path = "../../primitives/trie" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } +sp-wasm-interface = { version = "14.0.0", path = "../../primitives/wasm-interface" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" assert_matches = "1.3.0" wat = "1.0" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } tracing-subscriber = "0.2.19" paste = "1.0" regex = "1.6.0" @@ -59,5 +57,19 @@ harness = false [features] default = ["std"] # This crate does not have `no_std` support, we just require this for tests -std = [] +std = [ + "sc-runtime-test/std", + "sp-api/std", + "sp-core/std", + "sp-externalities/std", + "sp-io/std", + "sp-runtime/std", + "sp-runtime-interface/std", + "sp-state-machine/std", + "sp-tracing/std", + "sp-trie/std", + "sp-version/std", + "sp-wasm-interface/std", + "substrate-test-runtime/std" +] wasm-extern-trace = [] diff --git a/client/executor/benches/bench.rs b/client/executor/benches/bench.rs index 772898b8c76b3..66a82a175221d 100644 --- a/client/executor/benches/bench.rs +++ b/client/executor/benches/bench.rs @@ -25,7 +25,6 @@ use sc_executor_common::{ }; use sc_executor_wasmtime::InstantiationStrategy; use sc_runtime_test::wasm_binary_unwrap as test_runtime; -use sp_wasm_interface::HostFunctions as _; use std::sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, @@ -33,7 +32,6 @@ use std::sync::{ #[derive(Clone)] enum Method { - Interpreted, Compiled { instantiation_strategy: InstantiationStrategy, precompile: bool }, } @@ -50,17 +48,10 @@ fn initialize( method: Method, ) -> Box { let blob = RuntimeBlob::uncompress_if_needed(runtime).unwrap(); - let host_functions = sp_io::SubstrateHostFunctions::host_functions(); + let allow_missing_func_imports = true; match method { - Method::Interpreted => sc_executor_wasmi::create_runtime( - blob, - DEFAULT_HEAP_ALLOC_STRATEGY, - host_functions, - allow_missing_func_imports, - ) - .map(|runtime| -> Box { Box::new(runtime) }), Method::Compiled { instantiation_strategy, precompile } => { let config = sc_executor_wasmtime::Config { allow_missing_func_imports, @@ -188,12 +179,19 @@ fn bench_call_instance(c: &mut Criterion) { }, ), ( - "pooling_vanilla", + "pooling_vanilla_fresh", Method::Compiled { instantiation_strategy: InstantiationStrategy::Pooling, precompile: false, }, ), + ( + "pooling_vanilla_precompiled", + Method::Compiled { + instantiation_strategy: InstantiationStrategy::Pooling, + precompile: true, + }, + ), ( "pooling_cow_fresh", Method::Compiled { @@ -208,7 +206,6 @@ fn bench_call_instance(c: &mut Criterion) { precompile: true, }, ), - ("interpreted", Method::Interpreted), ]; let runtimes = [("kusama_runtime", kusama_runtime()), ("test_runtime", test_runtime())]; diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index dd74ea2cfd209..dfde1902631cd 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -16,10 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.30" wasm-instrument = "0.3" -wasmi = "0.13.2" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } -sp-wasm-interface = { version = "7.0.0", path = "../../../primitives/wasm-interface" } +sp-wasm-interface = { version = "14.0.0", path = "../../../primitives/wasm-interface" } [features] default = [] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 2dfe0bf02df2f..2a0dc364b4103 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -18,8 +18,6 @@ //! Rust executor possible errors. -use wasmi; - /// Result type alias. pub type Result = std::result::Result; @@ -27,9 +25,6 @@ pub type Result = std::result::Result; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { - #[error(transparent)] - Wasmi(#[from] wasmi::Error), - #[error("Error calling api function: {0}")] ApiError(Box), @@ -48,9 +43,6 @@ pub enum Error { #[error("Invalid type returned (should be u64)")] InvalidReturn, - #[error("Runtime error")] - Runtime, - #[error("Runtime panicked: {0}")] RuntimePanicked(String), @@ -109,8 +101,6 @@ pub enum Error { OutputExceedsBounds, } -impl wasmi::HostError for Error {} - impl From<&'static str> for Error { fn from(err: &'static str) -> Error { Error::Other(err.into()) diff --git a/client/executor/common/src/runtime_blob/mod.rs b/client/executor/common/src/runtime_blob/mod.rs index 58278493af51b..07a0945cc2b66 100644 --- a/client/executor/common/src/runtime_blob/mod.rs +++ b/client/executor/common/src/runtime_blob/mod.rs @@ -26,9 +26,6 @@ //! //! To give you some examples: //! -//! - wasmi allows reaching to non-exported mutable globals so that we could reset them. Wasmtime -//! doesn’t support that. -//! //! We need to reset the globals because when we //! execute the Substrate Runtime, we do not drop and create the instance anew, instead //! we restore some selected parts of the state. diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 07626c2343361..abf2fb5c27ad5 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -13,11 +13,11 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, features = ["improved_panic_error_reporting"], path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime-interface" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, features = ["improved_panic_error_reporting"], path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../../../primitives/runtime-interface" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder", optional = true } @@ -30,4 +30,5 @@ std = [ "sp-runtime/std", "sp-std/std", "substrate-wasm-builder", + "sp-runtime-interface/std" ] diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index 745742123a8f6..088c93110d855 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -24,6 +24,7 @@ fn main() { .with_current_project() .export_heap_base() .import_memory() + .disable_runtime_version_section_check() .build(); } @@ -36,6 +37,7 @@ fn main() { .import_memory() .set_file_name("wasm_binary_with_tracing.rs") .append_to_rust_flags(r#"--cfg feature="with-tracing""#) + .disable_runtime_version_section_check() .build(); } } diff --git a/client/executor/src/executor.rs b/client/executor/src/executor.rs index a3717f4d29002..7c292a83da089 100644 --- a/client/executor/src/executor.rs +++ b/client/executor/src/executor.rs @@ -88,6 +88,7 @@ pub struct WasmExecutorBuilder { method: WasmExecutionMethod, onchain_heap_alloc_strategy: Option, offchain_heap_alloc_strategy: Option, + ignore_onchain_heap_pages: bool, max_runtime_instances: usize, cache_path: Option, allow_missing_host_functions: bool, @@ -104,6 +105,7 @@ impl WasmExecutorBuilder { method: WasmExecutionMethod::default(), onchain_heap_alloc_strategy: None, offchain_heap_alloc_strategy: None, + ignore_onchain_heap_pages: false, max_runtime_instances: 2, runtime_cache_size: 4, allow_missing_host_functions: false, @@ -137,6 +139,14 @@ impl WasmExecutorBuilder { self } + /// Create the wasm executor and follow/ignore onchain heap pages value. + /// + /// By default this the onchain heap pages value is followed. + pub fn with_ignore_onchain_heap_pages(mut self, ignore_onchain_heap_pages: bool) -> Self { + self.ignore_onchain_heap_pages = ignore_onchain_heap_pages; + self + } + /// Create the wasm executor with the given maximum number of `instances`. /// /// The number of `instances` defines how many different instances of a runtime the cache is @@ -193,6 +203,7 @@ impl WasmExecutorBuilder { default_onchain_heap_alloc_strategy: unwrap_heap_pages( self.onchain_heap_alloc_strategy, ), + ignore_onchain_heap_pages: self.ignore_onchain_heap_pages, cache: Arc::new(RuntimeCache::new( self.max_runtime_instances, self.cache_path.clone(), @@ -214,6 +225,8 @@ pub struct WasmExecutor { default_onchain_heap_alloc_strategy: HeapAllocStrategy, /// The heap allocation strategy for offchain Wasm calls. default_offchain_heap_alloc_strategy: HeapAllocStrategy, + /// Ignore onchain heap pages value. + ignore_onchain_heap_pages: bool, /// WASM runtime cache. cache: Arc, /// The path to a directory which the executor can leverage for a file cache, e.g. put there @@ -230,6 +243,7 @@ impl Clone for WasmExecutor { method: self.method, default_onchain_heap_alloc_strategy: self.default_onchain_heap_alloc_strategy, default_offchain_heap_alloc_strategy: self.default_offchain_heap_alloc_strategy, + ignore_onchain_heap_pages: self.ignore_onchain_heap_pages, cache: self.cache.clone(), cache_path: self.cache_path.clone(), allow_missing_host_functions: self.allow_missing_host_functions, @@ -276,6 +290,7 @@ where default_offchain_heap_alloc_strategy: unwrap_heap_pages( default_heap_pages.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }), ), + ignore_onchain_heap_pages: false, cache: Arc::new(RuntimeCache::new( max_runtime_instances, cache_path.clone(), @@ -486,10 +501,14 @@ where "Executing function", ); - let on_chain_heap_alloc_strategy = runtime_code - .heap_pages - .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) - .unwrap_or_else(|| self.default_onchain_heap_alloc_strategy); + let on_chain_heap_alloc_strategy = if self.ignore_onchain_heap_pages { + self.default_onchain_heap_alloc_strategy + } else { + runtime_code + .heap_pages + .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) + .unwrap_or_else(|| self.default_onchain_heap_alloc_strategy) + }; let heap_alloc_strategy = match context { CallContext::Offchain => self.default_offchain_heap_alloc_strategy, @@ -518,10 +537,14 @@ where ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - let on_chain_heap_pages = runtime_code - .heap_pages - .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) - .unwrap_or_else(|| self.default_onchain_heap_alloc_strategy); + let on_chain_heap_pages = if self.ignore_onchain_heap_pages { + self.default_onchain_heap_alloc_strategy + } else { + runtime_code + .heap_pages + .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) + .unwrap_or_else(|| self.default_onchain_heap_alloc_strategy) + }; self.with_instance( runtime_code, @@ -631,10 +654,14 @@ impl CodeExecutor for NativeElseWasmExecut "Executing function", ); - let on_chain_heap_alloc_strategy = runtime_code - .heap_pages - .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) - .unwrap_or_else(|| self.wasm.default_onchain_heap_alloc_strategy); + let on_chain_heap_alloc_strategy = if self.wasm.ignore_onchain_heap_pages { + self.wasm.default_onchain_heap_alloc_strategy + } else { + runtime_code + .heap_pages + .map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }) + .unwrap_or_else(|| self.wasm.default_onchain_heap_alloc_strategy) + }; let heap_alloc_strategy = match context { CallContext::Offchain => self.wasm.default_offchain_heap_alloc_strategy, diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs index 434cb69bfdd32..68ac37e9011a1 100644 --- a/client/executor/src/integration_tests/linux.rs +++ b/client/executor/src/integration_tests/linux.rs @@ -18,34 +18,15 @@ //! Tests that are only relevant for Linux. +mod smaps; + use super::mk_test_runtime; use crate::WasmExecutionMethod; use codec::Encode as _; use sc_executor_common::wasm_runtime::DEFAULT_HEAP_ALLOC_STRATEGY; -mod smaps; - use self::smaps::Smaps; -#[test] -fn memory_consumption_interpreted() { - let _ = sp_tracing::try_init_simple(); - - if std::env::var("RUN_TEST").is_ok() { - memory_consumption(WasmExecutionMethod::Interpreted); - } else { - // We need to run the test in isolation, to not getting interfered by the other tests. - let executable = std::env::current_exe().unwrap(); - let output = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .args(&["--nocapture", "memory_consumption_interpreted"]) - .output() - .unwrap(); - - assert!(output.status.success()); - } -} - #[test] fn memory_consumption_compiled() { let _ = sp_tracing::try_init_simple(); diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index b65aeb8d01109..37aed8eef96a1 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -24,7 +24,7 @@ use codec::{Decode, Encode}; use sc_executor_common::{ error::Error, runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, WasmModule, DEFAULT_HEAP_ALLOC_STRATEGY}, + wasm_runtime::{HeapAllocStrategy, WasmModule}, }; use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ @@ -50,12 +50,6 @@ type HostFunctions = sp_io::SubstrateHostFunctions; macro_rules! test_wasm_execution { ($method_name:ident) => { paste::item! { - #[test] - fn [<$method_name _interpreted>]() { - let _ = sp_tracing::try_init_simple(); - $method_name(WasmExecutionMethod::Interpreted); - } - #[test] fn [<$method_name _compiled_recreate_instance_cow>]() { let _ = sp_tracing::try_init_simple(); @@ -97,15 +91,6 @@ macro_rules! test_wasm_execution { } } }; - - (interpreted_only $method_name:ident) => { - paste::item! { - #[test] - fn [<$method_name _interpreted>]() { - $method_name(WasmExecutionMethod::Interpreted); - } - } - }; } fn call_in_wasm( @@ -144,8 +129,8 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_calling_missing_external", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Other: Function `missing_external` is only a stub. Calling a stub is not allowed.", - WasmExecutionMethod::Compiled { .. } => "call to a missing function env:missing_external" + WasmExecutionMethod::Compiled { .. } => + "call to a missing function env:missing_external", }; assert_eq!(error.message, expected); }, @@ -163,8 +148,8 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Other: Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.", - WasmExecutionMethod::Compiled { .. } => "call to a missing function env:yet_another_missing_external" + WasmExecutionMethod::Compiled { .. } => + "call to a missing function env:yet_another_missing_external", }; assert_eq!(error.message, expected); }, @@ -473,9 +458,6 @@ fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { r#"host code panicked while being called by the runtime: Failed to allocate memory: "Allocator ran out of space""# ); }, - Error::RuntimePanicked(error) if wasm_method == WasmExecutionMethod::Interpreted => { - assert_eq!(error, r#"Failed to allocate memory: "Allocator ran out of space""#); - }, error => panic!("unexpected error: {:?}", error), } } @@ -558,25 +540,6 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { assert!(res.is_ok()); } -test_wasm_execution!(interpreted_only heap_is_reset_between_calls); -fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { - let runtime = mk_test_runtime(wasm_method, DEFAULT_HEAP_ALLOC_STRATEGY); - let mut instance = runtime.new_instance().unwrap(); - - let heap_base = instance - .get_global_const("__heap_base") - .expect("`__heap_base` is valid") - .expect("`__heap_base` exists") - .as_i32() - .expect("`__heap_base` is an `i32`"); - - let params = (heap_base as u32, 512u32 * 64 * 1024).encode(); - instance.call_export("check_and_set_in_heap", ¶ms).unwrap(); - - // Cal it a second time to check that the heap was freed. - instance.call_export("check_and_set_in_heap", ¶ms).unwrap(); -} - test_wasm_execution!(parallel_execution); fn parallel_execution(wasm_method: WasmExecutionMethod) { let executor = Arc::new( @@ -787,7 +750,6 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_unreachable_intrinsic", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "unreachable", WasmExecutionMethod::Compiled { .. } => "wasm trap: wasm `unreachable` instruction executed", }; @@ -814,9 +776,6 @@ fn return_huge_len(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); match call_in_wasm("test_return_huge_len", &[], wasm_method, &mut ext).unwrap_err() { - Error::Runtime => { - assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); - }, Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); }, @@ -843,9 +802,6 @@ fn return_max_memory_offset_plus_one(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_return_max_memory_offset_plus_one", &[], wasm_method, &mut ext) .unwrap_err() { - Error::Runtime => { - assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); - }, Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); }, @@ -859,9 +815,6 @@ fn return_overflow(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); match call_in_wasm("test_return_overflow", &[], wasm_method, &mut ext).unwrap_err() { - Error::Runtime => { - assert_matches!(wasm_method, WasmExecutionMethod::Interpreted); - }, Error::OutputExceedsBounds => { assert_matches!(wasm_method, WasmExecutionMethod::Compiled { .. }); }, diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 42e7dc7d16bd8..6ee0ab3512ac0 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -49,7 +49,6 @@ pub use sp_core::traits::Externalities; pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; -pub use wasmi; pub use sc_executor_common::{ error, diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 41a095081f8d1..6dec3abdb20cf 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -22,29 +22,27 @@ //! components of the runtime that are expensive to initialize. use crate::error::{Error, WasmError}; + use codec::Decode; -use lru::LruCache; use parking_lot::Mutex; use sc_executor_common::{ runtime_blob::RuntimeBlob, wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule}, }; +use schnellru::{ByLength, LruMap}; use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; use sp_version::RuntimeVersion; +use sp_wasm_interface::HostFunctions; + use std::{ - num::NonZeroUsize, panic::AssertUnwindSafe, path::{Path, PathBuf}, sync::Arc, }; -use sp_wasm_interface::HostFunctions; - /// Specification of different methods of executing the runtime Wasm code. #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] pub enum WasmExecutionMethod { - /// Uses the Wasmi interpreter. - Interpreted, /// Uses the Wasmtime compiled runtime. Compiled { /// The instantiation strategy to use. @@ -53,8 +51,10 @@ pub enum WasmExecutionMethod { } impl Default for WasmExecutionMethod { - fn default() -> WasmExecutionMethod { - WasmExecutionMethod::Interpreted + fn default() -> Self { + Self::Compiled { + instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::PoolingCopyOnWrite, + } } } @@ -163,7 +163,7 @@ pub struct RuntimeCache { /// A cache of runtimes along with metadata. /// /// Runtimes sorted by recent usage. The most recently used is at the front. - runtimes: Mutex>>, + runtimes: Mutex>>, /// The size of the instances cache for each runtime. max_runtime_instances: usize, cache_path: Option, @@ -185,9 +185,8 @@ impl RuntimeCache { cache_path: Option, runtime_cache_size: u8, ) -> RuntimeCache { - let cap = - NonZeroUsize::new(runtime_cache_size.max(1) as usize).expect("cache size is not zero"); - RuntimeCache { runtimes: Mutex::new(LruCache::new(cap)), max_runtime_instances, cache_path } + let cap = ByLength::new(runtime_cache_size.max(1) as u32); + RuntimeCache { runtimes: Mutex::new(LruMap::new(cap)), max_runtime_instances, cache_path } } /// Prepares a WASM module instance and executes given function for it. @@ -275,7 +274,7 @@ impl RuntimeCache { let versioned_runtime = Arc::new(result?); // Save new versioned wasm runtime in cache - runtimes.put(versioned_runtime_id, versioned_runtime.clone()); + runtimes.insert(versioned_runtime_id, versioned_runtime.clone()); versioned_runtime }; @@ -299,21 +298,6 @@ where H: HostFunctions, { match wasm_method { - WasmExecutionMethod::Interpreted => { - // Wasmi doesn't have any need in a cache directory. - // - // We drop the cache_path here to silence warnings that cache_path is not used if - // compiling without the `wasmtime` flag. - let _ = cache_path; - - sc_executor_wasmi::create_runtime( - blob, - heap_alloc_strategy, - H::host_functions(), - allow_missing_func_imports, - ) - .map(|runtime| -> Box { Box::new(runtime) }) - }, WasmExecutionMethod::Compiled { instantiation_strategy } => sc_executor_wasmtime::create_runtime::( blob, diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml deleted file mode 100644 index ded44f4ca77a9..0000000000000 --- a/client/executor/wasmi/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "sc-executor-wasmi" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "This crate provides an implementation of `WasmRuntime` that is baked by wasmi." -documentation = "https://docs.rs/sc-executor-wasmi" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -log = "0.4.17" -wasmi = { version = "0.13.2", features = [ "virtual_memory" ] } -sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } -sc-executor-common = { version = "0.10.0-dev", path = "../common" } -sp-runtime-interface = { version = "7.0.0", path = "../../../primitives/runtime-interface" } -sp-wasm-interface = { version = "7.0.0", path = "../../../primitives/wasm-interface" } diff --git a/client/executor/wasmi/README.md b/client/executor/wasmi/README.md deleted file mode 100644 index ad613aa1245e3..0000000000000 --- a/client/executor/wasmi/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This crate provides an implementation of `WasmModule` that is baked by wasmi. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs deleted file mode 100644 index c757ff8afe43d..0000000000000 --- a/client/executor/wasmi/src/lib.rs +++ /dev/null @@ -1,599 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! This crate provides an implementation of `WasmModule` that is baked by wasmi. - -use std::{cell::RefCell, str, sync::Arc}; - -use log::{error, trace}; -use wasmi::{ - memory_units::Pages, - FuncInstance, ImportsBuilder, MemoryRef, Module, ModuleInstance, ModuleRef, - RuntimeValue::{self, I32, I64}, - TableRef, -}; - -use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; -use sc_executor_common::{ - error::{Error, MessageWithBacktrace, WasmError}, - runtime_blob::{DataSegmentsSnapshot, RuntimeBlob}, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule}, -}; -use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{Function, FunctionContext, Pointer, Result as WResult, WordSize}; - -/// Wrapper around [`MemorRef`] that implements [`sc_allocator::Memory`]. -struct MemoryWrapper<'a>(&'a MemoryRef); - -impl sc_allocator::Memory for MemoryWrapper<'_> { - fn with_access_mut(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R { - self.0.with_direct_access_mut(run) - } - - fn with_access(&self, run: impl FnOnce(&[u8]) -> R) -> R { - self.0.with_direct_access(run) - } - - fn pages(&self) -> u32 { - self.0.current_size().0 as _ - } - - fn max_pages(&self) -> Option { - self.0.maximum().map(|p| p.0 as _) - } - - fn grow(&mut self, additional: u32) -> Result<(), ()> { - self.0 - .grow(Pages(additional as _)) - .map_err(|e| { - log::error!( - target: "wasm-executor", - "Failed to grow memory by {} pages: {}", - additional, - e, - ) - }) - .map(drop) - } -} - -struct FunctionExecutor { - heap: RefCell, - memory: MemoryRef, - host_functions: Arc>, - allow_missing_func_imports: bool, - missing_functions: Arc>, - panic_message: Option, -} - -impl FunctionExecutor { - fn new( - m: MemoryRef, - heap_base: u32, - host_functions: Arc>, - allow_missing_func_imports: bool, - missing_functions: Arc>, - ) -> Result { - Ok(FunctionExecutor { - heap: RefCell::new(FreeingBumpHeapAllocator::new(heap_base)), - memory: m, - host_functions, - allow_missing_func_imports, - missing_functions, - panic_message: None, - }) - } -} - -impl FunctionContext for FunctionExecutor { - fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> WResult<()> { - self.memory.get_into(address.into(), dest).map_err(|e| e.to_string()) - } - - fn write_memory(&mut self, address: Pointer, data: &[u8]) -> WResult<()> { - self.memory.set(address.into(), data).map_err(|e| e.to_string()) - } - - fn allocate_memory(&mut self, size: WordSize) -> WResult> { - self.heap - .borrow_mut() - .allocate(&mut MemoryWrapper(&self.memory), size) - .map_err(|e| e.to_string()) - } - - fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { - self.heap - .borrow_mut() - .deallocate(&mut MemoryWrapper(&self.memory), ptr) - .map_err(|e| e.to_string()) - } - - fn register_panic_error_message(&mut self, message: &str) { - self.panic_message = Some(message.to_owned()); - } -} - -/// Will be used on initialization of a module to resolve function and memory imports. -struct Resolver<'a> { - /// All the hot functions that we export for the WASM blob. - host_functions: &'a [&'static dyn Function], - /// Should we allow missing function imports? - /// - /// If `true`, we return a stub that will return an error when being called. - allow_missing_func_imports: bool, - /// All the names of functions for that we did not provide a host function. - missing_functions: RefCell>, -} - -impl<'a> Resolver<'a> { - fn new( - host_functions: &'a [&'static dyn Function], - allow_missing_func_imports: bool, - ) -> Resolver<'a> { - Resolver { - host_functions, - allow_missing_func_imports, - missing_functions: RefCell::new(Vec::new()), - } - } -} - -impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { - fn resolve_func( - &self, - name: &str, - signature: &wasmi::Signature, - ) -> std::result::Result { - let signature = sp_wasm_interface::Signature::from(signature); - for (function_index, function) in self.host_functions.iter().enumerate() { - if name == function.name() { - if signature == function.signature() { - return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)) - } else { - return Err(wasmi::Error::Instantiation(format!( - "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", - function.name(), - signature, - function.signature(), - ))) - } - } - } - - if self.allow_missing_func_imports { - trace!( - target: "wasm-executor", - "Could not find function `{}`, a stub will be provided instead.", - name, - ); - let id = self.missing_functions.borrow().len() + self.host_functions.len(); - self.missing_functions.borrow_mut().push(name.to_string()); - - Ok(wasmi::FuncInstance::alloc_host(signature.into(), id)) - } else { - Err(wasmi::Error::Instantiation(format!("Export {} not found", name))) - } - } - - fn resolve_memory( - &self, - _: &str, - _: &wasmi::MemoryDescriptor, - ) -> Result { - Err(wasmi::Error::Instantiation( - "Internal error, wasmi expects that the wasm blob exports memory.".into(), - )) - } -} - -impl wasmi::Externals for FunctionExecutor { - fn invoke_index( - &mut self, - index: usize, - args: wasmi::RuntimeArgs, - ) -> Result, wasmi::Trap> { - let mut args = args.as_ref().iter().copied().map(Into::into); - - if let Some(function) = self.host_functions.clone().get(index) { - function - .execute(self, &mut args) - .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) - .map_err(wasmi::Trap::from) - .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports && - index >= self.host_functions.len() && - index < self.host_functions.len() + self.missing_functions.len() - { - Err(Error::from(format!( - "Function `{}` is only a stub. Calling a stub is not allowed.", - self.missing_functions[index - self.host_functions.len()], - )) - .into()) - } else { - Err(Error::from(format!("Could not find host function with index: {}", index)).into()) - } - } -} - -fn get_mem_instance(module: &ModuleRef) -> Result { - Ok(module - .export_by_name("memory") - .ok_or(Error::InvalidMemoryReference)? - .as_memory() - .ok_or(Error::InvalidMemoryReference)? - .clone()) -} - -/// Find the global named `__heap_base` in the given wasm module instance and -/// tries to get its value. -fn get_heap_base(module: &ModuleRef) -> Result { - let heap_base_val = module - .export_by_name("__heap_base") - .ok_or(Error::HeapBaseNotFoundOrInvalid)? - .as_global() - .ok_or(Error::HeapBaseNotFoundOrInvalid)? - .get(); - - match heap_base_val { - wasmi::RuntimeValue::I32(v) => Ok(v as u32), - _ => Err(Error::HeapBaseNotFoundOrInvalid), - } -} - -/// Call a given method in the given wasm-module runtime. -fn call_in_wasm_module( - module_instance: &ModuleRef, - memory: &MemoryRef, - method: InvokeMethod, - data: &[u8], - host_functions: Arc>, - allow_missing_func_imports: bool, - missing_functions: Arc>, - allocation_stats: &mut Option, -) -> Result, Error> { - // Initialize FunctionExecutor. - let table: Option = module_instance - .export_by_name("__indirect_function_table") - .and_then(|e| e.as_table().cloned()); - let heap_base = get_heap_base(module_instance)?; - - let mut function_executor = FunctionExecutor::new( - memory.clone(), - heap_base, - host_functions, - allow_missing_func_imports, - missing_functions, - )?; - - // Write the call data - let offset = function_executor.allocate_memory(data.len() as u32)?; - function_executor.write_memory(offset, data)?; - - fn convert_trap(executor: &mut FunctionExecutor, trap: wasmi::Trap) -> Error { - if let Some(message) = executor.panic_message.take() { - Error::AbortedDueToPanic(MessageWithBacktrace { message, backtrace: None }) - } else { - Error::AbortedDueToTrap(MessageWithBacktrace { - message: trap.to_string(), - backtrace: None, - }) - } - } - - let result = match method { - InvokeMethod::Export(method) => module_instance - .invoke_export( - method, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut function_executor, - ) - .map_err(|error| { - if let wasmi::Error::Trap(trap) = error { - convert_trap(&mut function_executor, trap) - } else { - error.into() - } - }), - InvokeMethod::Table(func_ref) => { - let func = table - .ok_or(Error::NoTable)? - .get(func_ref)? - .ok_or(Error::NoTableEntryWithIndex(func_ref))?; - FuncInstance::invoke( - &func, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut function_executor, - ) - .map_err(|trap| convert_trap(&mut function_executor, trap)) - }, - InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let dispatcher = table - .ok_or(Error::NoTable)? - .get(dispatcher_ref)? - .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; - - FuncInstance::invoke( - &dispatcher, - &[I32(func as _), I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut function_executor, - ) - .map_err(|trap| convert_trap(&mut function_executor, trap)) - }, - }; - - *allocation_stats = Some(function_executor.heap.borrow().stats()); - - match result { - Ok(Some(I64(r))) => { - let (ptr, length) = unpack_ptr_and_len(r as u64); - #[allow(deprecated)] - memory.get(ptr, length as usize).map_err(|_| Error::Runtime) - }, - Err(e) => { - trace!( - target: "wasm-executor", - "Failed to execute code with {} pages", - memory.current_size().0, - ); - Err(e) - }, - _ => Err(Error::InvalidReturn), - } -} - -/// Prepare module instance -fn instantiate_module( - module: &Module, - host_functions: &[&'static dyn Function], - allow_missing_func_imports: bool, -) -> Result<(ModuleRef, Vec, MemoryRef), Error> { - let resolver = Resolver::new(host_functions, allow_missing_func_imports); - // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = - ModuleInstance::new(module, &ImportsBuilder::new().with_resolver("env", &resolver))?; - - // Verify that the module has the heap base global variable. - let _ = get_heap_base(intermediate_instance.not_started_instance())?; - - // The `module` should export the memory with the correct properties (min, max). - // - // This is ensured by modifying the `RuntimeBlob` before initializing the `Module`. - let memory = get_mem_instance(intermediate_instance.not_started_instance())?; - - if intermediate_instance.has_start() { - // Runtime is not allowed to have the `start` function. - Err(Error::RuntimeHasStartFn) - } else { - Ok(( - intermediate_instance.assert_no_start(), - resolver.missing_functions.into_inner(), - memory, - )) - } -} - -/// A state snapshot of an instance taken just after instantiation. -/// -/// It is used for restoring the state of the module after execution. -#[derive(Clone)] -struct GlobalValsSnapshot { - /// The list of all global mutable variables of the module in their sequential order. - global_mut_values: Vec, -} - -impl GlobalValsSnapshot { - // Returns `None` if instance is not valid. - fn take(module_instance: &ModuleRef) -> Self { - // Collect all values of mutable globals. - let global_mut_values = module_instance - .globals() - .iter() - .filter(|g| g.is_mutable()) - .map(|g| g.get()) - .collect(); - Self { global_mut_values } - } - - /// Reset the runtime instance to the initial version by restoring - /// the preserved memory and globals. - /// - /// Returns `Err` if applying the snapshot is failed. - fn apply(&self, instance: &ModuleRef) -> Result<(), WasmError> { - for (global_ref, global_val) in instance - .globals() - .iter() - .filter(|g| g.is_mutable()) - .zip(self.global_mut_values.iter()) - { - // the instance should be the same as used for preserving and - // we iterate the same way it as we do it for preserving values that means that the - // types should be the same and all the values are mutable. So no error is expected/ - global_ref.set(*global_val).map_err(|_| WasmError::ApplySnapshotFailed)?; - } - Ok(()) - } -} - -/// A runtime along with initial copy of data segments. -pub struct WasmiRuntime { - /// A wasm module. - module: Module, - /// The host functions registered for this instance. - host_functions: Arc>, - /// Enable stub generation for functions that are not available in `host_functions`. - /// These stubs will error when the wasm blob tries to call them. - allow_missing_func_imports: bool, - - global_vals_snapshot: GlobalValsSnapshot, - data_segments_snapshot: DataSegmentsSnapshot, -} - -impl WasmModule for WasmiRuntime { - fn new_instance(&self) -> Result, Error> { - // Instantiate this module. - let (instance, missing_functions, memory) = - instantiate_module(&self.module, &self.host_functions, self.allow_missing_func_imports) - .map_err(|e| WasmError::Instantiation(e.to_string()))?; - - Ok(Box::new(WasmiInstance { - instance, - memory, - global_vals_snapshot: self.global_vals_snapshot.clone(), - data_segments_snapshot: self.data_segments_snapshot.clone(), - host_functions: self.host_functions.clone(), - allow_missing_func_imports: self.allow_missing_func_imports, - missing_functions: Arc::new(missing_functions), - memory_zeroed: true, - })) - } -} - -/// Create a new `WasmiRuntime` given the code. This function loads the module and -/// stores it in the instance. -pub fn create_runtime( - mut blob: RuntimeBlob, - heap_alloc_strategy: HeapAllocStrategy, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, -) -> Result { - let data_segments_snapshot = - DataSegmentsSnapshot::take(&blob).map_err(|e| WasmError::Other(e.to_string()))?; - - // Make sure we only have exported memory to simplify the code of the wasmi executor. - blob.convert_memory_import_into_export()?; - // Ensure that the memory uses the correct heap pages. - blob.setup_memory_according_to_heap_alloc_strategy(heap_alloc_strategy)?; - - let module = - Module::from_parity_wasm_module(blob.into_inner()).map_err(|_| WasmError::InvalidModule)?; - - let global_vals_snapshot = { - let (instance, _, _) = - instantiate_module(&module, &host_functions, allow_missing_func_imports) - .map_err(|e| WasmError::Instantiation(e.to_string()))?; - GlobalValsSnapshot::take(&instance) - }; - - Ok(WasmiRuntime { - module, - data_segments_snapshot, - global_vals_snapshot, - host_functions: Arc::new(host_functions), - allow_missing_func_imports, - }) -} - -/// Wasmi instance wrapper along with the state snapshot. -pub struct WasmiInstance { - /// A wasm module instance. - instance: ModuleRef, - /// The memory instance of used by the wasm module. - memory: MemoryRef, - /// Is the memory zeroed? - memory_zeroed: bool, - /// The snapshot of global variable values just after instantiation. - global_vals_snapshot: GlobalValsSnapshot, - /// The snapshot of data segments. - data_segments_snapshot: DataSegmentsSnapshot, - /// The host functions registered for this instance. - host_functions: Arc>, - /// Enable stub generation for functions that are not available in `host_functions`. - /// These stubs will error when the wasm blob trie to call them. - allow_missing_func_imports: bool, - /// List of missing functions detected during function resolution - missing_functions: Arc>, -} - -// This is safe because `WasmiInstance` does not leak any references to `self.memory` and -// `self.instance` -unsafe impl Send for WasmiInstance {} - -impl WasmiInstance { - fn call_impl( - &mut self, - method: InvokeMethod, - data: &[u8], - allocation_stats: &mut Option, - ) -> Result, Error> { - // We reuse a single wasm instance for multiple calls and a previous call (if any) - // altered the state. Therefore, we need to restore the instance to original state. - - if !self.memory_zeroed { - // First, zero initialize the linear memory. - self.memory.erase().map_err(|e| { - // Snapshot restoration failed. This is pretty unexpected since this can happen - // if some invariant is broken or if the system is under extreme memory pressure - // (so erasing fails). - error!(target: "wasm-executor", "snapshot restoration failed: {}", e); - WasmError::ErasingFailed(e.to_string()) - })?; - } - - // Second, reapply data segments into the linear memory. - self.data_segments_snapshot - .apply(|offset, contents| self.memory.set(offset, contents))?; - - // Third, restore the global variables to their initial values. - self.global_vals_snapshot.apply(&self.instance)?; - - let res = call_in_wasm_module( - &self.instance, - &self.memory, - method, - data, - self.host_functions.clone(), - self.allow_missing_func_imports, - self.missing_functions.clone(), - allocation_stats, - ); - - // If we couldn't unmap it, erase the memory. - self.memory_zeroed = self.memory.erase().is_ok(); - - res - } -} - -impl WasmInstance for WasmiInstance { - fn call_with_allocation_stats( - &mut self, - method: InvokeMethod, - data: &[u8], - ) -> (Result, Error>, Option) { - let mut allocation_stats = None; - let result = self.call_impl(method, data, &mut allocation_stats); - (result, allocation_stats) - } - - fn get_global_const(&mut self, name: &str) -> Result, Error> { - match self.instance.export_by_name(name) { - Some(global) => Ok(Some( - global - .as_global() - .ok_or_else(|| format!("`{}` is not a global", name))? - .get() - .into(), - )), - None => Ok(None), - } - } - - fn linear_memory_base_ptr(&self) -> Option<*const u8> { - Some(self.memory.direct_access().as_ref().as_ptr()) - } -} diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 58a1ffde07898..3e669e7c9e701 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -19,7 +19,7 @@ libc = "0.2.121" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "6.0.2", default-features = false, features = [ +wasmtime = { version = "8.0.1", default-features = false, features = [ "cache", "cranelift", "jitdump", @@ -29,8 +29,8 @@ wasmtime = { version = "6.0.2", default-features = false, features = [ anyhow = "1.0.68" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } -sp-runtime-interface = { version = "7.0.0", path = "../../../primitives/runtime-interface" } -sp-wasm-interface = { version = "7.0.0", path = "../../../primitives/wasm-interface", features = ["wasmtime"] } +sp-runtime-interface = { version = "17.0.0", path = "../../../primitives/runtime-interface" } +sp-wasm-interface = { version = "14.0.0", path = "../../../primitives/wasm-interface", features = ["wasmtime"] } # Here we include the rustix crate in the exactly same semver-compatible version as used by # wasmtime and enable its 'use-libc' flag. @@ -39,13 +39,12 @@ sp-wasm-interface = { version = "7.0.0", path = "../../../primitives/wasm-interf # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). rustix = { version = "0.36.7", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } -once_cell = "1.12.0" [dev-dependencies] wat = "1.0" sc-runtime-test = { version = "2.0.0", path = "../runtime-test" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } tempfile = "3.3.0" paste = "1.0" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } cargo_metadata = "0.15.4" diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index c45478ec46a37..82e62b4a5dd3c 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -37,6 +37,7 @@ mod util; mod tests; pub use runtime::{ - create_runtime, create_runtime_from_artifact, prepare_runtime_artifact, Config, - DeterministicStackLimit, InstantiationStrategy, Semantics, + create_runtime, create_runtime_from_artifact, create_runtime_from_artifact_bytes, + prepare_runtime_artifact, Config, DeterministicStackLimit, InstantiationStrategy, Semantics, + WasmtimeRuntime, }; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index c9a2c83e0493c..23b069870aa36 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -435,7 +435,7 @@ pub struct DeterministicStackLimit { /// All of the CoW strategies (with `CopyOnWrite` suffix) are only supported when either: /// a) we're running on Linux, /// b) we're running on an Unix-like system and we're precompiling -/// our module beforehand. +/// our module beforehand and instantiating from a file. /// /// If the CoW variant of a strategy is unsupported the executor will /// fall back to the non-CoW equivalent. @@ -537,7 +537,7 @@ enum CodeSupplyMode<'a> { /// The runtime is instantiated using the given runtime blob. Fresh(RuntimeBlob), - /// The runtime is instantiated using a precompiled module. + /// The runtime is instantiated using a precompiled module at the given path. /// /// This assumes that the code is already prepared for execution and the same `Config` was /// used. @@ -545,6 +545,12 @@ enum CodeSupplyMode<'a> { /// We use a `Path` here instead of simply passing a byte slice to allow `wasmtime` to /// map the runtime's linear memory on supported platforms in a copy-on-write fashion. Precompiled(&'a Path), + + /// The runtime is instantiated using a precompiled module with the given bytes. + /// + /// This assumes that the code is already prepared for execution and the same `Config` was + /// used. + PrecompiledBytes(&'a [u8]), } /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to @@ -589,6 +595,31 @@ where do_create_runtime::(CodeSupplyMode::Precompiled(compiled_artifact_path), config) } +/// The same as [`create_runtime`] but takes the bytes of a precompiled artifact, +/// which makes this function considerably faster than [`create_runtime`], +/// but slower than the more optimized [`create_runtime_from_artifact`]. +/// This is especially slow on non-Linux Unix systems. Useful in very niche cases. +/// +/// # Safety +/// +/// The caller must ensure that the compiled artifact passed here was: +/// 1) produced by [`prepare_runtime_artifact`], +/// 2) was not modified, +/// +/// Failure to adhere to these requirements might lead to crashes and arbitrary code execution. +/// +/// It is ok though if the compiled artifact was created by code of another version or with +/// different configuration flags. In such case the caller will receive an `Err` deterministically. +pub unsafe fn create_runtime_from_artifact_bytes( + compiled_artifact_bytes: &[u8], + config: Config, +) -> std::result::Result +where + H: HostFunctions, +{ + do_create_runtime::(CodeSupplyMode::PrecompiledBytes(compiled_artifact_bytes), config) +} + /// # Safety /// /// This is only unsafe if called with [`CodeSupplyMode::Artifact`]. See @@ -663,6 +694,22 @@ where (module, InternalInstantiationStrategy::Builtin) }, + CodeSupplyMode::PrecompiledBytes(compiled_artifact_bytes) => { + if let InstantiationStrategy::LegacyInstanceReuse = + config.semantics.instantiation_strategy + { + return Err(WasmError::Other("the legacy instance reuse instantiation strategy is incompatible with precompiled modules".into())); + } + + // SAFETY: The unsafety of `deserialize` is covered by this function. The + // responsibilities to maintain the invariants are passed to the caller. + // + // See [`create_runtime_from_artifact_bytes`] for more details. + let module = wasmtime::Module::deserialize(&engine, compiled_artifact_bytes) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:#}", e)))?; + + (module, InternalInstantiationStrategy::Builtin) + }, }; let mut linker = wasmtime::Linker::new(&engine); diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index e8d7c5ab1afac..65093687822d4 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -500,10 +500,7 @@ fn test_instances_without_reuse_are_not_leaked() { #[test] fn test_rustix_version_matches_with_wasmtime() { - let metadata = cargo_metadata::MetadataCommand::new() - .manifest_path("../../../Cargo.toml") - .exec() - .unwrap(); + let metadata = cargo_metadata::MetadataCommand::new().exec().unwrap(); let wasmtime_rustix = metadata .packages diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 5c64fc01c13a8..c38d969ce9dcd 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -144,8 +144,8 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, }; - use once_cell::sync::OnceCell; - static IS_OK: OnceCell = OnceCell::new(); + use std::sync::OnceLock; + static IS_OK: OnceLock = OnceLock::new(); let is_ok = IS_OK.get_or_init(|| { let is_ok = match is_madvise_working() { diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index cd84dcb5a0dfe..eb6e854ee3583 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -21,4 +21,4 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 81e047e90ab31..b5af0bc90343a 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -14,21 +14,30 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" parking_lot = "0.12.1" serde_json = "1.0.85" thiserror = "1.0" -sp-application-crypto = { version = "7.0.0", path = "../../primitives/application-crypto" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-application-crypto = { version = "23.0.0", path = "../../primitives/application-crypto" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } [dev-dependencies] tempfile = "3.1.0" [features] -# This feature adds BLS crypto primitives. It should not be used in production since -# the BLS implementation and interface may still be subject to significant change. +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. bls-experimental = [ - "sp-core/bls-experimental", - "sp-keystore/bls-experimental", + "sp-core/bls-experimental", + "sp-keystore/bls-experimental", +] + +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = [ + "sp-core/bandersnatch-experimental", + "sp-keystore/bandersnatch-experimental", ] diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 4151f8c4dc1a4..2d353f3ceba5d 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -26,6 +26,7 @@ use std::io; /// Local keystore implementation mod local; pub use local::LocalKeystore; +pub use sp_keystore::Keystore; /// Keystore error. #[derive(Debug, thiserror::Error)] diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 4167e486ecf62..97bc7c71a4a58 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -19,6 +19,8 @@ use parking_lot::RwLock; use sp_application_crypto::{AppCrypto, AppPair, IsWrappedBy}; +#[cfg(feature = "bandersnatch-experimental")] +use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] use sp_core::{bls377, bls381}; use sp_core::{ @@ -234,6 +236,69 @@ impl Keystore for LocalKeystore { Ok(sig) } + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + /// Generate a new pair compatible with the 'bandersnatch' signature scheme. + /// + /// If `[seed]` is `Some` then the key will be ephemeral and stored in memory. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + self.generate_new::(key_type, seed) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + ) -> std::result::Result, TraitError> { + self.vrf_sign::(key_type, public, data) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> std::result::Result, TraitError> { + self.vrf_output::(key_type, public, input) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> std::result::Result, TraitError> { + let sig = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|pair| pair.ring_vrf_sign(data, prover)); + Ok(sig) + } + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) diff --git a/client/merkle-mountain-range/Cargo.toml b/client/merkle-mountain-range/Cargo.toml index 899f4cc2e08da..7270cfbda0169 100644 --- a/client/merkle-mountain-range/Cargo.toml +++ b/client/merkle-mountain-range/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3" log = "0.4" sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } @@ -19,14 +19,14 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-consensus-beefy = { version = "4.0.0-dev", path = "../../primitives/consensus/beefy" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-mmr-primitives = { version = "4.0.0-dev", path = "../../primitives/merkle-mountain-range" } sc-offchain = { version = "4.0.0-dev", path = "../offchain" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } [dev-dependencies] parking_lot = "0.12.1" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "1.17.0" diff --git a/client/merkle-mountain-range/rpc/Cargo.toml b/client/merkle-mountain-range/rpc/Cargo.toml index ce71158808e1e..c6f8652c7e843 100644 --- a/client/merkle-mountain-range/rpc/Cargo.toml +++ b/client/merkle-mountain-range/rpc/Cargo.toml @@ -12,14 +12,14 @@ description = "Node-specific RPC methods for interaction with Merkle Mountain Ra targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle-mountain-range" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } anyhow = "1" [dev-dependencies] diff --git a/client/merkle-mountain-range/rpc/src/lib.rs b/client/merkle-mountain-range/rpc/src/lib.rs index daf2cd1ec298b..5be82b600d914 100644 --- a/client/merkle-mountain-range/rpc/src/lib.rs +++ b/client/merkle-mountain-range/rpc/src/lib.rs @@ -30,9 +30,12 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; -use sp_core::Bytes; +use sp_core::{ + offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage}, + Bytes, +}; use sp_mmr_primitives::{Error as MmrError, Proof}; use sp_runtime::traits::Block as BlockT; @@ -127,26 +130,28 @@ pub trait MmrApi { } /// MMR RPC methods. -pub struct Mmr { +pub struct Mmr { client: Arc, + offchain_db: OffchainDb, _marker: PhantomData, } -impl Mmr { +impl Mmr { /// Create new `Mmr` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Self { client, _marker: Default::default() } + pub fn new(client: Arc, offchain_storage: S) -> Self { + Self { client, _marker: Default::default(), offchain_db: OffchainDb::new(offchain_storage) } } } #[async_trait] -impl MmrApiServer<::Hash, NumberFor, MmrHash> - for Mmr +impl MmrApiServer<::Hash, NumberFor, MmrHash> + for Mmr where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, Client::Api: MmrRuntimeApi>, MmrHash: Codec + Send + Sync + 'static, + S: OffchainStorage + 'static, { fn mmr_root(&self, at: Option<::Hash>) -> RpcResult { let block_hash = at.unwrap_or_else(|| @@ -166,18 +171,15 @@ where best_known_block_number: Option>, at: Option<::Hash>, ) -> RpcResult::Hash>> { - let api = self.client.runtime_api(); + let mut api = self.client.runtime_api(); let block_hash = at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash); + api.register_extension(OffchainDbExt::new(self.offchain_db.clone())); + let (leaves, proof) = api - .generate_proof_with_context( - block_hash, - sp_core::ExecutionContext::OffchainCall(None), - block_numbers, - best_known_block_number, - ) + .generate_proof(block_hash, block_numbers, best_known_block_number) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; @@ -185,7 +187,7 @@ where } fn verify_proof(&self, proof: LeavesProof<::Hash>) -> RpcResult { - let api = self.client.runtime_api(); + let mut api = self.client.runtime_api(); let leaves = Decode::decode(&mut &proof.leaves.0[..]) .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; @@ -193,14 +195,11 @@ where let decoded_proof = Decode::decode(&mut &proof.proof.0[..]) .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - api.verify_proof_with_context( - proof.block_hash, - sp_core::ExecutionContext::OffchainCall(None), - leaves, - decoded_proof, - ) - .map_err(runtime_error_into_rpc_error)? - .map_err(mmr_error_into_rpc_error)?; + api.register_extension(OffchainDbExt::new(self.offchain_db.clone())); + + api.verify_proof(proof.block_hash, leaves, decoded_proof) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; Ok(true) } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 40277c946a1d7..e25a769587dab 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -19,13 +19,12 @@ futures = "0.3.21" futures-timer = "3.0.1" libp2p = "0.51.3" log = "0.4.17" -lru = "0.8.1" +schnellru = "0.2.1" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network/" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } -sc-peerset = { version = "4.0.0-dev", path = "../peerset" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.22.0" diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 4793d7822ddbe..6a3790ee2b2b2 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,9 +21,8 @@ use crate::{ Network, Syncing, Validator, }; -use sc_network::{event::Event, types::ProtocolName}; +use sc_network::{event::Event, types::ProtocolName, ReputationChange}; use sc_network_common::sync::SyncEvent; -use sc_peerset::ReputationChange; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -239,10 +238,7 @@ impl Future for GossipEngine { SyncEvent::PeerConnected(remote) => this.network.add_set_reserved(remote, this.protocol.clone()), SyncEvent::PeerDisconnected(remote) => - this.network.remove_peers_from_reserved_set( - this.protocol.clone(), - vec![remote], - ), + this.network.remove_set_reserved(remote, this.protocol.clone()), }, // The sync event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { @@ -414,20 +410,14 @@ mod tests { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, _protocol: ProtocolName, - _peers: HashSet, + _peers: Vec, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - fn sync_num_connected(&self) -> usize { unimplemented!(); } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index ef87dd599e010..5b02be5c23f69 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -89,6 +89,12 @@ pub trait Network: NetworkPeers + NetworkEventStream + NetworkNotific log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } + fn remove_set_reserved(&self, who: PeerId, protocol: ProtocolName) { + let result = self.remove_peers_from_reserved_set(protocol, iter::once(who).collect()); + if let Err(err) = result { + log::error!(target: "gossip", "remove_set_reserved failed: {}", err); + } + } } impl Network for T where T: NetworkPeers + NetworkEventStream + NetworkNotification {} diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index e6d2b0e2ae4c8..4bfb5a7d37f49 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -20,12 +20,13 @@ use crate::{MessageIntent, Network, ValidationResult, Validator, ValidatorContex use ahash::AHashSet; use libp2p::PeerId; -use lru::LruCache; +use schnellru::{ByLength, LruMap}; + use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::types::ProtocolName; use sc_network_common::role::ObservedRole; -use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use std::{collections::HashMap, iter, num::NonZeroUsize, sync::Arc, time, time::Instant}; +use sp_runtime::traits::{Block as BlockT, Hash, HashingFor}; +use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 // NOTE: The current value is adjusted based on largest production network deployment (Kusama) and @@ -36,14 +37,14 @@ use std::{collections::HashMap, iter, num::NonZeroUsize, sync::Arc, time, time:: // // Assuming that each known message is tracked with a 32 byte hash (common for `Block::Hash`), then // this cache should take about 256 KB of memory. -const KNOWN_MESSAGES_CACHE_SIZE: usize = 8192; +const KNOWN_MESSAGES_CACHE_SIZE: u32 = 8192; const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sends us a gossip message that we didn't know about. pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successful gossip"); /// Reputation change when a peer sends us a gossip message that we already knew about. @@ -155,7 +156,7 @@ where pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, - known_messages: LruCache, + known_messages: LruMap, protocol: ProtocolName, validator: Arc>, next_broadcast: Instant, @@ -181,11 +182,7 @@ impl ConsensusGossip { ConsensusGossip { peers: HashMap::new(), messages: Default::default(), - known_messages: { - let cap = NonZeroUsize::new(KNOWN_MESSAGES_CACHE_SIZE) - .expect("cache capacity is not zero"); - LruCache::new(cap) - }, + known_messages: { LruMap::new(ByLength::new(KNOWN_MESSAGES_CACHE_SIZE)) }, protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, @@ -216,7 +213,7 @@ impl ConsensusGossip { message: Vec, sender: Option, ) { - if self.known_messages.put(message_hash, ()).is_none() { + if self.known_messages.insert(message_hash, ()) { self.messages.push(MessageEntry { message_hash, topic, message, sender }); if let Some(ref metrics) = self.metrics { @@ -231,7 +228,7 @@ impl ConsensusGossip { /// message is already expired it should be dropped on the next garbage /// collection. pub fn register_message(&mut self, topic: B::Hash, message: Vec) { - let message_hash = HashFor::::hash(&message[..]); + let message_hash = HashingFor::::hash(&message[..]); self.register_message_hashed(message_hash, topic, message, None); } @@ -313,7 +310,7 @@ impl ConsensusGossip { ); for (_, ref mut peer) in self.peers.iter_mut() { - peer.known_messages.retain(|h| known_messages.contains(h)); + peer.known_messages.retain(|h| known_messages.get(h).is_some()); } } @@ -346,9 +343,9 @@ impl ConsensusGossip { } for message in messages { - let message_hash = HashFor::::hash(&message[..]); + let message_hash = HashingFor::::hash(&message[..]); - if self.known_messages.contains(&message_hash) { + if self.known_messages.get(&message_hash).is_some() { tracing::trace!( target: "gossip", %who, @@ -459,7 +456,7 @@ impl ConsensusGossip { message: Vec, force: bool, ) { - let message_hash = HashFor::::hash(&message); + let message_hash = HashingFor::::hash(&message); self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate( @@ -480,7 +477,7 @@ impl ConsensusGossip { Some(peer) => peer, }; - let message_hash = HashFor::::hash(&message); + let message_hash = HashingFor::::hash(&message); tracing::trace!( target: "gossip", @@ -529,9 +526,8 @@ mod tests { use sc_network::{ config::MultiaddrWithPeerId, event::Event, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSenderError, - NotificationSenderT as NotificationSender, + NotificationSenderT as NotificationSender, ReputationChange, }; - use sc_peerset::ReputationChange; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::NumberFor, @@ -546,7 +542,7 @@ mod tests { macro_rules! push_msg { ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { - if $consensus.known_messages.put($hash, ()).is_none() { + if $consensus.known_messages.insert($hash, ()) { $consensus.messages.push(MessageEntry { message_hash: $hash, topic: $topic, @@ -644,20 +640,14 @@ mod tests { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, _protocol: ProtocolName, - _peers: HashSet, + _peers: Vec, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - fn sync_num_connected(&self) -> usize { unimplemented!(); } @@ -733,8 +723,8 @@ mod tests { push_msg!(consensus, prev_hash, m1_hash, m1); push_msg!(consensus, best_hash, m2_hash, m2); - consensus.known_messages.put(m1_hash, ()); - consensus.known_messages.put(m2_hash, ()); + consensus.known_messages.insert(m1_hash, ()); + consensus.known_messages.insert(m2_hash, ()); consensus.collect_garbage(); assert_eq!(consensus.messages.len(), 2); @@ -747,7 +737,7 @@ mod tests { assert_eq!(consensus.messages.len(), 1); // known messages are only pruned based on size. assert_eq!(consensus.known_messages.len(), 2); - assert!(consensus.known_messages.contains(&m2_hash)); + assert!(consensus.known_messages.get(&m2_hash).is_some()); } #[test] @@ -756,7 +746,7 @@ mod tests { // Register message. let message = vec![4, 5, 6]; - let topic = HashFor::::hash(&[1, 2, 3]); + let topic = HashingFor::::hash(&[1, 2, 3]); consensus.register_message(topic, message.clone()); assert_eq!( diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index de4c4c14a2587..887368a02bcca 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -14,12 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" async-channel = "1.8.0" async-trait = "0.1" asynchronous-codec = "0.6" bytes = "1" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" futures = "0.3.21" @@ -28,36 +28,30 @@ ip_network = "0.4.1" libp2p = { version = "0.51.3", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux", "websocket", "request-response"] } linked_hash_set = "0.1.3" log = "0.4.17" -lru = "0.8.1" mockall = "0.11.3" parking_lot = "0.12.1" +partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" -smallvec = "1.8.0" +smallvec = "1.11.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } zeroize = "1.4.3" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "./common" } -sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } -sp-arithmetic = { version = "6.0.0", path = "../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -# Force 0.9.2 as snow release to fix the compilation. -# -# When libp2p also enforces this version, we can get rid off this extra dep here. -snow = "0.9.2" +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +wasm-timer = "0.2" [dev-dependencies] assert_matches = "1.3" +mockall = "0.11.3" multistream-select = "0.12.1" rand = "0.8.5" tempfile = "3.1.0" @@ -67,7 +61,7 @@ tokio-test = "0.4.2" sc-network-light = { version = "0.10.0-dev", path = "./light" } sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network/bitswap/Cargo.toml b/client/network/bitswap/Cargo.toml index a953676ec160e..bdc9e18f7457d 100644 --- a/client/network/bitswap/Cargo.toml +++ b/client/network/bitswap/Cargo.toml @@ -16,7 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -cid = "0.8.6" +async-channel = "1.8.0" +cid = "0.9.0" futures = "0.3.21" libp2p-identity = { version = "0.1.2", features = ["peerid"] } log = "0.4.17" @@ -25,15 +26,14 @@ thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network = { version = "0.10.0-dev", path = "../" } -sc-network-common = { version = "0.10.0-dev", path = "../common" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] tokio = { version = "1.22.0", features = ["full"] } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index a7857f6eec362..beaaa8fd0fdec 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -21,7 +21,7 @@ //! CID is expected to reference 256-bit Blake2b transaction hash. use cid::{self, Version}; -use futures::{channel::mpsc, StreamExt}; +use futures::StreamExt; use libp2p_identity::PeerId; use log::{debug, error, trace}; use prost::Message; @@ -93,13 +93,13 @@ impl Prefix { /// Bitswap request handler pub struct BitswapRequestHandler { client: Arc + Send + Sync>, - request_receiver: mpsc::Receiver, + request_receiver: async_channel::Receiver, } impl BitswapRequestHandler { /// Create a new [`BitswapRequestHandler`]. pub fn new(client: Arc + Send + Sync>) -> (Self, ProtocolConfig) { - let (tx, request_receiver) = mpsc::channel(MAX_REQUEST_QUEUE); + let (tx, request_receiver) = async_channel::bounded(MAX_REQUEST_QUEUE); let config = ProtocolConfig { name: ProtocolName::from(PROTOCOL_NAME), @@ -289,7 +289,7 @@ pub enum BitswapError { #[cfg(test)] mod tests { use super::*; - use futures::{channel::oneshot, SinkExt}; + use futures::channel::oneshot; use sc_block_builder::BlockBuilderProvider; use schema::bitswap::{ message::{wantlist::Entry, Wantlist}, diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index d9769413b857f..c2fee608d79b6 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -16,28 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -array-bytes = "4.1" async-trait = "0.1.57" bitflags = "1.3.2" -bytes = "1" -codec = { package = "parity-scale-codec", version = "3.2.2", features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } futures = "0.3.21" -futures-timer = "3.0.2" libp2p-identity = { version = "0.1.2", features = ["peerid"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } -smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sc-utils = { version = "4.0.0-dev", path = "../../utils" } -serde = { version = "1.0.136", features = ["derive"] } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -thiserror = "1.0" -zeroize = "1.4.3" +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] tempfile = "3.1.0" diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index f53590efd4c84..8149f5ea708cd 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -21,6 +21,7 @@ pub mod message; pub mod role; pub mod sync; +pub mod types; /// Minimum Requirements for a Hash within Networking pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 404a1aff91153..b142925aeb10c 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -22,7 +22,7 @@ pub mod message; pub mod metrics; pub mod warp; -use crate::role::Roles; +use crate::{role::Roles, types::ReputationChange}; use futures::Stream; use libp2p_identity::PeerId; @@ -106,7 +106,7 @@ pub struct SyncStatus { /// A peer did not behave as expected and should be reported. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); +pub struct BadPeer(pub PeerId, pub ReputationChange); impl fmt::Display for BadPeer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -185,32 +185,43 @@ pub enum PollBlockAnnounceValidation { /// The announcement. announce: BlockAnnounce, }, - /// The announcement header should be imported. - ImportHeader { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, /// The block announcement should be skipped. Skip, } -/// Operation mode. -#[derive(Debug, PartialEq, Eq)] +/// Sync operation mode. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { - // Sync headers only - Light, - // Sync headers and block bodies + /// Full block download and verification. Full, - // Sync headers and the last finalied state - LightState { storage_chain_mode: bool, skip_proofs: bool }, - // Warp sync mode. + /// Download blocks and the latest state. + LightState { + /// Skip state proof download and verification. + skip_proofs: bool, + /// Download indexed transactions for recent blocks. + storage_chain_mode: bool, + }, + /// Warp sync - verify authority set transitions and the latest state. Warp, } +impl SyncMode { + /// Returns `true` if `self` is [`Self::Warp`]. + pub fn is_warp(&self) -> bool { + matches!(self, Self::Warp) + } + + /// Returns `true` if `self` is [`Self::LightState`]. + pub fn light_state(&self) -> bool { + matches!(self, Self::LightState { .. }) + } +} + +impl Default for SyncMode { + fn default() -> Self { + Self::Full + } +} #[derive(Debug)] pub struct Metrics { pub queued_blocks: u32, @@ -376,12 +387,6 @@ pub trait ChainSync: Send { response: BlockResponse, ) -> Result, BadPeer>; - /// Procss received block data. - fn process_block_response_data( - &mut self, - blocks_to_import: Result, BadPeer>, - ); - /// Handle a response from the remote to a justification request that we made. /// /// `request` must be the original request that triggered `response`. @@ -421,9 +426,6 @@ pub trait ChainSync: Send { /// [`ChainSync::push_block_announce_validation`]. /// /// This should be polled until it returns [`Poll::Pending`]. - /// - /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to - /// import passed header (call `on_block_data`). The network request isn't sent in this case. fn poll_block_announce_validation( &mut self, cx: &mut std::task::Context<'_>, diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs index aef257af4d057..37a6e62c53b4e 100644 --- a/client/network/common/src/sync/warp.rs +++ b/client/network/common/src/sync/warp.rs @@ -72,7 +72,7 @@ pub trait WarpSyncProvider: Send + Sync { #[derive(Clone, Eq, PartialEq, Debug)] pub enum WarpSyncPhase { /// Waiting for peers to connect. - AwaitingPeers, + AwaitingPeers { required_peers: usize }, /// Waiting for target block to be received. AwaitingTargetBlock, /// Downloading and verifying grandpa warp proofs. @@ -90,7 +90,8 @@ pub enum WarpSyncPhase { impl fmt::Display for WarpSyncPhase { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Self::AwaitingPeers => write!(f, "Waiting for peers"), + Self::AwaitingPeers { required_peers } => + write!(f, "Waiting for {required_peers} peers to be connected"), Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"), Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), Self::DownloadingTargetBlock => write!(f, "Downloading target block"), diff --git a/client/network/common/src/types.rs b/client/network/common/src/types.rs new file mode 100644 index 0000000000000..d23a2236d556a --- /dev/null +++ b/client/network/common/src/types.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Description of a reputation adjustment for a node. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ReputationChange { + /// Reputation delta. + pub value: i32, + /// Reason for reputation change. + pub reason: &'static str, +} + +impl ReputationChange { + /// New reputation change with given delta and reason. + pub const fn new(value: i32, reason: &'static str) -> ReputationChange { + Self { value, reason } + } + + /// New reputation change that forces minimum possible reputation. + pub const fn new_fatal(reason: &'static str) -> ReputationChange { + Self { value: i32::MIN, reason } + } +} diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index cd0dfbca50d2a..8bdb640d5558b 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -16,8 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -array-bytes = "4.1" -codec = { package = "parity-scale-codec", version = "3.2.2", features = [ +async-channel = "1.8.0" +array-bytes = "6.1" +codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } futures = "0.3.21" @@ -27,8 +28,6 @@ prost = "0.11" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network = { version = "0.10.0-dev", path = "../" } -sc-network-common = { version = "0.10.0-dev", path = "../common" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } thiserror = "1.0" diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 2a68ebe9c2b23..8f2bcc7384b33 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -24,7 +24,7 @@ use crate::schema; use codec::{self, Decode, Encode}; -use futures::{channel::mpsc, prelude::*}; +use futures::prelude::*; use libp2p_identity::PeerId; use log::{debug, trace}; use prost::Message; @@ -32,8 +32,8 @@ use sc_client_api::{BlockBackend, ProofProvider}; use sc_network::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + ReputationChange, }; -use sc_peerset::ReputationChange; use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, @@ -43,9 +43,13 @@ use std::{marker::PhantomData, sync::Arc}; const LOG_TARGET: &str = "light-client-request-handler"; +/// Incoming requests bounded queue size. For now due to lack of data on light client request +/// handling in production systems, this value is chosen to match the block request limit. +const MAX_LIGHT_REQUEST_QUEUE: usize = 20; + /// Handler for incoming light client requests from a remote peer. pub struct LightClientRequestHandler { - request_receiver: mpsc::Receiver, + request_receiver: async_channel::Receiver, /// Blockchain client. client: Arc, _block: PhantomData, @@ -62,9 +66,7 @@ where fork_id: Option<&str>, client: Arc, ) -> (Self, ProtocolConfig) { - // For now due to lack of data on light client request handling in production systems, this - // value is chosen to match the block request limit. - let (tx, request_receiver) = mpsc::channel(20); + let (tx, request_receiver) = async_channel::bounded(MAX_LIGHT_REQUEST_QUEUE); let mut protocol_config = super::generate_protocol_config( protocol_id, diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index ef967eee92686..0aa724818e02a 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -20,9 +20,11 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, event::DhtEvent, peer_info, + peer_store::PeerStoreHandle, protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure}, types::ProtocolName, + ReputationChange, }; use bytes::Bytes; @@ -32,10 +34,10 @@ use libp2p::{ swarm::NetworkBehaviour, PeerId, }; +use parking_lot::Mutex; use sc_network_common::role::{ObservedRole, Roles}; -use sc_peerset::{PeersetHandle, ReputationChange}; use sp_runtime::traits::Block as BlockT; -use std::{collections::HashSet, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; @@ -169,15 +171,20 @@ impl Behaviour { local_public_key: PublicKey, disco_config: DiscoveryConfig, request_response_protocols: Vec, - peerset: PeersetHandle, + peer_store_handle: PeerStoreHandle, + external_addresses: Arc>>, ) -> Result { Ok(Self { substrate, - peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), + peer_info: peer_info::PeerInfoBehaviour::new( + user_agent, + local_public_key, + external_addresses, + ), discovery: disco_config.finish(), request_responses: request_responses::RequestResponsesBehaviour::new( request_response_protocols.into_iter(), - peerset, + Box::new(peer_store_handle), )?, }) } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 17ca8335653de..d069c3f458ff5 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -22,6 +22,7 @@ //! See the documentation of [`Params`]. pub use crate::{ + discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR, protocol::NotificationsSink, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, @@ -31,13 +32,14 @@ pub use crate::{ pub use libp2p::{identity::Keypair, multiaddr, Multiaddr, PeerId}; +use crate::peer_store::PeerStoreHandle; use codec::Encode; use prometheus_endpoint::Registry; use zeroize::Zeroize; pub use sc_network_common::{ role::{Role, Roles}, - sync::warp::WarpSyncProvider, + sync::{warp::WarpSyncProvider, SyncMode}, ExHashT, }; use sc_utils::mpsc::TracingUnboundedSender; @@ -50,6 +52,7 @@ use std::{ io::{self, Write}, iter, net::Ipv4Addr, + num::NonZeroUsize, path::{Path, PathBuf}, pin::Pin, str::{self, FromStr}, @@ -273,39 +276,10 @@ impl NonReservedPeerMode { _ => None, } } -} - -/// Sync operation mode. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum SyncMode { - /// Full block download and verification. - Full, - /// Download blocks and the latest state. - Fast { - /// Skip state proof download and verification. - skip_proofs: bool, - /// Download indexed transactions for recent blocks. - storage_chain_mode: bool, - }, - /// Warp sync - verify authority set transitions and the latest state. - Warp, -} -impl SyncMode { - /// Returns if `self` is [`Self::Warp`]. - pub fn is_warp(&self) -> bool { - matches!(self, Self::Warp) - } - - /// Returns if `self` is [`Self::Fast`]. - pub fn is_fast(&self) -> bool { - matches!(self, Self::Fast { .. }) - } -} - -impl Default for SyncMode { - fn default() -> Self { - Self::Full + /// If we are in "reserved-only" peer mode. + pub fn is_reserved_only(&self) -> bool { + matches!(self, NonReservedPeerMode::Deny) } } @@ -605,6 +579,12 @@ pub struct NetworkConfiguration { /// the presence of potentially adversarial nodes. pub kademlia_disjoint_query_paths: bool, + /// Kademlia replication factor determines to how many closest peers a record is replicated to. + /// + /// Discovery mechanism requires successful replication to all + /// `kademlia_replication_factor` peers to consider record successfully put. + pub kademlia_replication_factor: NonZeroUsize, + /// Enable serving block data over IPFS bitswap. pub ipfs_server: bool, @@ -656,6 +636,8 @@ impl NetworkConfiguration { enable_dht_random_walk: true, allow_non_globals_in_dht: false, kademlia_disjoint_query_paths: false, + kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) + .expect("value is a constant; constant is non-zero; qed."), yamux_window_size: None, ipfs_server: false, } @@ -703,6 +685,9 @@ pub struct Params { /// Network layer configuration. pub network_config: FullNetworkConfiguration, + /// Peer store with known nodes, peer reputations, etc. + pub peer_store: PeerStoreHandle, + /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 708406bd15340..77c26266aac46 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -87,6 +87,10 @@ use std::{ /// a given address. const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; +/// Default value for Kademlia replication factor which determines to how many closest peers a +/// record is replicated to. +pub const DEFAULT_KADEMLIA_REPLICATION_FACTOR: usize = 20; + /// `DiscoveryBehaviour` configuration. /// /// Note: In order to discover nodes or load and store values via Kademlia one has to add @@ -101,6 +105,7 @@ pub struct DiscoveryConfig { enable_mdns: bool, kademlia_disjoint_query_paths: bool, kademlia_protocols: Vec>, + kademlia_replication_factor: NonZeroUsize, } impl DiscoveryConfig { @@ -116,6 +121,8 @@ impl DiscoveryConfig { enable_mdns: false, kademlia_disjoint_query_paths: false, kademlia_protocols: Vec::new(), + kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) + .expect("value is a constant; constant is non-zero; qed."), } } @@ -182,6 +189,12 @@ impl DiscoveryConfig { self } + /// Sets Kademlia replication factor. + pub fn with_kademlia_replication_factor(&mut self, value: NonZeroUsize) -> &mut Self { + self.kademlia_replication_factor = value; + self + } + /// Create a `DiscoveryBehaviour` from this config. pub fn finish(self) -> DiscoveryBehaviour { let Self { @@ -194,10 +207,13 @@ impl DiscoveryConfig { enable_mdns, kademlia_disjoint_query_paths, kademlia_protocols, + kademlia_replication_factor, } = self; let kademlia = if !kademlia_protocols.is_empty() { let mut config = KademliaConfig::default(); + + config.set_replication_factor(kademlia_replication_factor); config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the @@ -234,14 +250,14 @@ impl DiscoveryConfig { discovery_only_if_under_num, mdns: if enable_mdns { match TokioMdns::new(mdns::Config::default(), local_peer_id) { - Ok(mdns) => Some(mdns), + Ok(mdns) => Toggle::from(Some(mdns)), Err(err) => { warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None + Toggle::from(None) }, } } else { - None + Toggle::from(None) }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( @@ -265,7 +281,7 @@ pub struct DiscoveryBehaviour { /// it's always enabled in `NetworkWorker::new()`. kademlia: Toggle>, /// Discovers nodes on the local network. - mdns: Option, + mdns: Toggle, /// Stream that fires when we need to perform the next random Kademlia query. `None` if /// random walking is disabled. next_kad_random_query: Option, @@ -532,7 +548,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { addresses: &[Multiaddr], effective_role: Endpoint, ) -> Result, ConnectionDenied> { - let Some(peer_id) = maybe_peer else { return Ok(Vec::new()); }; + let Some(peer_id) = maybe_peer else { return Ok(Vec::new()) }; let mut list = self .permanent_addresses @@ -552,14 +568,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { effective_role, )?; - if let Some(ref mut mdns) = self.mdns { - list_to_filter.extend(mdns.handle_pending_outbound_connection( - connection_id, - maybe_peer, - addresses, - effective_role, - )?); - } + list_to_filter.extend(self.mdns.handle_pending_outbound_connection( + connection_id, + maybe_peer, + addresses, + effective_role, + )?); if !self.allow_private_ip { list_to_filter.retain(|addr| match addr.iter().next() { @@ -647,6 +661,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, FromSwarm::NewListenAddr(e) => { self.kademlia.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); }, } } @@ -883,34 +898,32 @@ impl NetworkBehaviour for DiscoveryBehaviour { } // Poll mDNS. - if let Some(ref mut mdns) = self.mdns { - while let Poll::Ready(ev) = mdns.poll(cx, params) { - match ev { - ToSwarm::GenerateEvent(event) => match event { - mdns::Event::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue - } + while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + match ev { + ToSwarm::GenerateEvent(event) => match event { + mdns::Event::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue + } - self.pending_events - .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ToSwarm::GenerateEvent(ev)) - } - }, - mdns::Event::Expired(_) => {}, - }, - ToSwarm::Dial { .. } => { - unreachable!("mDNS never dials!"); + self.pending_events + .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ToSwarm::GenerateEvent(ev)) + } }, - ToSwarm::NotifyHandler { event, .. } => match event {}, /* `event` is an */ - // enum with no - // variant - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - } + mdns::Event::Expired(_) => {}, + }, + ToSwarm::Dial { .. } => { + unreachable!("mDNS never dials!"); + }, + ToSwarm::NotifyHandler { event, .. } => match event {}, /* `event` is an */ + // enum with no + // variant + ToSwarm::ReportObservedAddr { address, score } => + return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), + ToSwarm::CloseConnection { peer_id, connection } => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), } } @@ -1003,6 +1016,7 @@ mod tests { TokioExecutor(runtime), ) .build(); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); diff --git a/client/network/src/event.rs b/client/network/src/event.rs index 9c1034ea3dc61..2913f0b55225f 100644 --- a/client/network/src/event.rs +++ b/client/network/src/event.rs @@ -106,6 +106,8 @@ pub enum SyncEvent { received_handshake: BlockAnnouncesHandshake, /// Notification sink. sink: NotificationsSink, + /// Is the connection inbound. + inbound: bool, /// Channel for reporting accept/reject of the substream. tx: oneshot::Sender, }, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index a66c187cacf7b..ee30759687841 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -246,12 +246,17 @@ mod behaviour; mod protocol; mod service; +#[cfg(test)] +mod mock; + pub mod config; pub mod discovery; pub mod error; pub mod event; pub mod network_state; pub mod peer_info; +pub mod peer_store; +pub mod protocol_controller; pub mod request_responses; pub mod transport; pub mod types; @@ -267,6 +272,7 @@ pub use sc_network_common::{ warp::{WarpSyncPhase, WarpSyncProgress}, ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, }, + types::ReputationChange, }; pub use service::{ signature::Signature, @@ -281,8 +287,6 @@ pub use service::{ }; pub use types::ProtocolName; -pub use sc_peerset::ReputationChange; - /// The maximum allowed number of established connections per peer. /// /// Typically, and by design of the network behaviours in this crate, diff --git a/client/network/src/mock.rs b/client/network/src/mock.rs new file mode 100644 index 0000000000000..bc596b0fa579e --- /dev/null +++ b/client/network/src/mock.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Mocked components for tests. + +use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange}; +use libp2p::PeerId; +use std::collections::HashSet; + +/// No-op `PeerStore`. +#[derive(Debug)] +pub struct MockPeerStore {} + +impl PeerStoreProvider for MockPeerStore { + fn is_banned(&self, _peer_id: &PeerId) -> bool { + // Make sure that the peer is not banned. + false + } + + fn register_protocol(&self, _protocol_handle: ProtocolHandle) { + // Make sure not to fail. + } + + fn report_disconnect(&mut self, _peer_id: PeerId) { + // Make sure not to fail. + } + + fn report_peer(&mut self, _peer_id: PeerId, _change: ReputationChange) { + // Make sure not to fail. + } + + fn peer_reputation(&self, _peer_id: &PeerId) -> i32 { + // Make sure that the peer is not banned. + 0 + } + + fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec { + unimplemented!() + } +} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index aab3fc9487e8c..2735bd873db91 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -43,11 +43,13 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, trace}; +use parking_lot::Mutex; use smallvec::SmallVec; use std::{ - collections::hash_map::Entry, + collections::{hash_map::Entry, HashSet}, pin::Pin, + sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; @@ -67,6 +69,8 @@ pub struct PeerInfoBehaviour { nodes_info: FnvHashMap, /// Interval at which we perform garbage collection in `nodes_info`. garbage_collect: Pin + Send>>, + /// Record keeping of external addresses. Data is queried by the `NetworkService`. + external_addresses: ExternalAddresses, } /// Information about a node we're connected to. @@ -91,9 +95,31 @@ impl NodeInfo { } } +/// Utility struct for tracking external addresses. The data is shared with the `NetworkService`. +#[derive(Debug, Clone, Default)] +pub struct ExternalAddresses { + addresses: Arc>>, +} + +impl ExternalAddresses { + /// Add an external address. + pub fn add(&mut self, addr: Multiaddr) { + self.addresses.lock().insert(addr); + } + + /// Remove an external address. + pub fn remove(&mut self, addr: &Multiaddr) { + self.addresses.lock().remove(addr); + } +} + impl PeerInfoBehaviour { /// Builds a new `PeerInfoBehaviour`. - pub fn new(user_agent: String, local_public_key: PublicKey) -> Self { + pub fn new( + user_agent: String, + local_public_key: PublicKey, + external_addresses: Arc>>, + ) -> Self { let identify = { let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key) .with_agent_version(user_agent) @@ -107,6 +133,7 @@ impl PeerInfoBehaviour { identify, nodes_info: FnvHashMap::default(), garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), + external_addresses: ExternalAddresses { addresses: external_addresses }, } } @@ -362,10 +389,12 @@ impl NetworkBehaviour for PeerInfoBehaviour { FromSwarm::ExpiredListenAddr(e) => { self.ping.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); self.identify.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); + self.external_addresses.remove(e.addr); }, FromSwarm::NewExternalAddr(e) => { self.ping.on_swarm_event(FromSwarm::NewExternalAddr(e)); self.identify.on_swarm_event(FromSwarm::NewExternalAddr(e)); + self.external_addresses.add(e.addr.clone()); }, FromSwarm::AddressChange(e @ AddressChange { peer_id, old, new, .. }) => { self.ping.on_swarm_event(FromSwarm::AddressChange(e)); diff --git a/client/network/src/peer_store.rs b/client/network/src/peer_store.rs new file mode 100644 index 0000000000000..2f3d4a1fd1a0b --- /dev/null +++ b/client/network/src/peer_store.rs @@ -0,0 +1,413 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`PeerStore`] manages peer reputations and provides connection candidates to +//! [`crate::protocol_controller::ProtocolController`]. + +use libp2p::PeerId; +use log::trace; +use parking_lot::Mutex; +use partial_sort::PartialSort; +use sc_network_common::types::ReputationChange; +use std::{ + cmp::{Ord, Ordering, PartialOrd}, + collections::{hash_map::Entry, HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::{Duration, Instant}, +}; +use wasm_timer::Delay; + +use crate::protocol_controller::ProtocolHandle; + +/// Log target for this file. +pub const LOG_TARGET: &str = "peerset"; + +/// We don't accept nodes whose reputation is under this value. +pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); +/// Reputation change for a node when we get disconnected from it. +const DISCONNECT_REPUTATION_CHANGE: i32 = -256; +/// Relative decrement of a reputation value that is applied every second. I.e., for inverse +/// decrement of 50 we decrease absolute value of the reputation by 1/50. This corresponds to a +/// factor of `k = 0.98`. It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half, +/// or 34.3 seconds for the values above. In this setup the maximum allowed absolute value of +/// `i32::MAX` becomes 0 in ~1100 seconds (actually less due to integer arithmetic). +const INVERSE_DECREMENT: i32 = 50; +/// Amount of time between the moment we last updated the [`PeerStore`] entry and the moment we +/// remove it, once the reputation value reaches 0. +const FORGET_AFTER: Duration = Duration::from_secs(3600); + +/// Trait providing peer reputation management and connection candidates. +pub trait PeerStoreProvider: Debug + Send { + /// Check whether the peer is banned. + fn is_banned(&self, peer_id: &PeerId) -> bool; + + /// Register a protocol handle to disconnect peers whose reputation drops below the threshold. + fn register_protocol(&self, protocol_handle: ProtocolHandle); + + /// Report peer disconnection for reputation adjustment. + fn report_disconnect(&mut self, peer_id: PeerId); + + /// Adjust peer reputation. + fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); + + /// Get peer reputation. + fn peer_reputation(&self, peer_id: &PeerId) -> i32; + + /// Get candidates with highest reputations for initiating outgoing connections. + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec; +} + +/// Actual implementation of peer reputations and connection candidates provider. +#[derive(Debug, Clone)] +pub struct PeerStoreHandle { + inner: Arc>, +} + +impl PeerStoreProvider for PeerStoreHandle { + fn is_banned(&self, peer_id: &PeerId) -> bool { + self.inner.lock().is_banned(peer_id) + } + + fn register_protocol(&self, protocol_handle: ProtocolHandle) { + self.inner.lock().register_protocol(protocol_handle); + } + + fn report_disconnect(&mut self, peer_id: PeerId) { + self.inner.lock().report_disconnect(peer_id) + } + + fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange) { + self.inner.lock().report_peer(peer_id, change) + } + + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { + self.inner.lock().peer_reputation(peer_id) + } + + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { + self.inner.lock().outgoing_candidates(count, ignored) + } +} + +impl PeerStoreHandle { + /// Get the number of known peers. + /// + /// This number might not include some connected peers in rare cases when their reputation + /// was not updated for one hour, because their entries in [`PeerStore`] were dropped. + pub fn num_known_peers(&self) -> usize { + self.inner.lock().peers.len() + } + + /// Add known peer. + pub fn add_known_peer(&mut self, peer_id: PeerId) { + self.inner.lock().add_known_peer(peer_id); + } +} + +#[derive(Debug, Clone, Copy)] +struct PeerInfo { + reputation: i32, + last_updated: Instant, +} + +impl Default for PeerInfo { + fn default() -> Self { + Self { reputation: 0, last_updated: Instant::now() } + } +} + +impl PartialEq for PeerInfo { + fn eq(&self, other: &Self) -> bool { + self.reputation == other.reputation + } +} + +impl Eq for PeerInfo {} + +impl Ord for PeerInfo { + // We define reverse order by reputation values. + fn cmp(&self, other: &Self) -> Ordering { + self.reputation.cmp(&other.reputation).reverse() + } +} + +impl PartialOrd for PeerInfo { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PeerInfo { + fn is_banned(&self) -> bool { + self.reputation < BANNED_THRESHOLD + } + + fn add_reputation(&mut self, increment: i32) { + self.reputation = self.reputation.saturating_add(increment); + self.bump_last_updated(); + } + + fn decay_reputation(&mut self, seconds_passed: u64) { + // Note that decaying the reputation value happens "on its own", + // so we don't do `bump_last_updated()`. + for _ in 0..seconds_passed { + let mut diff = self.reputation / INVERSE_DECREMENT; + if diff == 0 && self.reputation < 0 { + diff = -1; + } else if diff == 0 && self.reputation > 0 { + diff = 1; + } + + self.reputation = self.reputation.saturating_sub(diff); + + if self.reputation == 0 { + break + } + } + } + + fn bump_last_updated(&mut self) { + self.last_updated = Instant::now(); + } +} + +#[derive(Debug)] +struct PeerStoreInner { + peers: HashMap, + protocols: Vec, +} + +impl PeerStoreInner { + fn is_banned(&self, peer_id: &PeerId) -> bool { + self.peers.get(peer_id).map_or(false, |info| info.is_banned()) + } + + fn register_protocol(&mut self, protocol_handle: ProtocolHandle) { + self.protocols.push(protocol_handle); + } + + fn report_disconnect(&mut self, peer_id: PeerId) { + let peer_info = self.peers.entry(peer_id).or_default(); + peer_info.add_reputation(DISCONNECT_REPUTATION_CHANGE); + + log::trace!( + target: LOG_TARGET, + "Peer {} disconnected, reputation: {:+} to {}", + peer_id, + DISCONNECT_REPUTATION_CHANGE, + peer_info.reputation, + ); + } + + fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange) { + let peer_info = self.peers.entry(peer_id).or_default(); + peer_info.add_reputation(change.value); + + if peer_info.reputation < BANNED_THRESHOLD { + self.protocols.iter().for_each(|handle| handle.disconnect_peer(peer_id)); + + log::trace!( + target: LOG_TARGET, + "Report {}: {:+} to {}. Reason: {}. Banned, disconnecting.", + peer_id, + change.value, + peer_info.reputation, + change.reason, + ); + } else { + log::trace!( + target: LOG_TARGET, + "Report {}: {:+} to {}. Reason: {}.", + peer_id, + change.value, + peer_info.reputation, + change.reason, + ); + } + } + + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { + self.peers.get(peer_id).map_or(0, |info| info.reputation) + } + + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { + let mut candidates = self + .peers + .iter() + .filter_map(|(peer_id, info)| { + (!info.is_banned() && !ignored.contains(peer_id)).then_some((*peer_id, *info)) + }) + .collect::>(); + let count = std::cmp::min(count, candidates.len()); + candidates.partial_sort(count, |(_, info1), (_, info2)| info1.cmp(info2)); + candidates.iter().take(count).map(|(peer_id, _)| *peer_id).collect() + + // TODO: keep the peers sorted (in a "bi-multi-map"?) to not repeat sorting every time. + } + + fn progress_time(&mut self, seconds_passed: u64) { + if seconds_passed == 0 { + return + } + + // Drive reputation values towards 0. + self.peers + .iter_mut() + .for_each(|(_, info)| info.decay_reputation(seconds_passed)); + + // Retain only entries with non-zero reputation values or not expired ones. + let now = Instant::now(); + self.peers + .retain(|_, info| info.reputation != 0 || info.last_updated + FORGET_AFTER > now); + } + + fn add_known_peer(&mut self, peer_id: PeerId) { + match self.peers.entry(peer_id) { + Entry::Occupied(mut e) => { + trace!( + target: LOG_TARGET, + "Trying to add an already known peer {peer_id}, bumping `last_updated`.", + ); + e.get_mut().bump_last_updated(); + }, + Entry::Vacant(e) => { + trace!(target: LOG_TARGET, "Adding a new known peer {peer_id}."); + e.insert(PeerInfo::default()); + }, + } + } +} + +/// Worker part of [`PeerStoreHandle`] +#[derive(Debug)] +pub struct PeerStore { + inner: Arc>, +} + +impl PeerStore { + /// Create a new peer store from the list of bootnodes. + pub fn new(bootnodes: Vec) -> Self { + PeerStore { + inner: Arc::new(Mutex::new(PeerStoreInner { + peers: bootnodes + .into_iter() + .map(|peer_id| (peer_id, PeerInfo::default())) + .collect(), + protocols: Vec::new(), + })), + } + } + + /// Get `PeerStoreHandle`. + pub fn handle(&self) -> PeerStoreHandle { + PeerStoreHandle { inner: self.inner.clone() } + } + + /// Drive the `PeerStore`, decaying reputation values over time and removing expired entries. + pub async fn run(self) { + let started = Instant::now(); + let mut latest_time_update = started; + + loop { + let now = Instant::now(); + // We basically do `(now - self.latest_update).as_secs()`, except that by the way we do + // it we know that we're not going to miss seconds because of rounding to integers. + let seconds_passed = { + let elapsed_latest = latest_time_update - started; + let elapsed_now = now - started; + latest_time_update = now; + elapsed_now.as_secs() - elapsed_latest.as_secs() + }; + + self.inner.lock().progress_time(seconds_passed); + let _ = Delay::new(Duration::from_secs(1)).await; + } + } +} + +#[cfg(test)] +mod tests { + use super::PeerInfo; + + #[test] + fn decaying_zero_reputation_yields_zero() { + let mut peer_info = PeerInfo::default(); + assert_eq!(peer_info.reputation, 0); + + peer_info.decay_reputation(1); + assert_eq!(peer_info.reputation, 0); + + peer_info.decay_reputation(100_000); + assert_eq!(peer_info.reputation, 0); + } + + #[test] + fn decaying_positive_reputation_decreases_it() { + const INITIAL_REPUTATION: i32 = 100; + + let mut peer_info = PeerInfo::default(); + peer_info.reputation = INITIAL_REPUTATION; + + peer_info.decay_reputation(1); + assert!(peer_info.reputation >= 0); + assert!(peer_info.reputation < INITIAL_REPUTATION); + } + + #[test] + fn decaying_negative_reputation_increases_it() { + const INITIAL_REPUTATION: i32 = -100; + + let mut peer_info = PeerInfo::default(); + peer_info.reputation = INITIAL_REPUTATION; + + peer_info.decay_reputation(1); + assert!(peer_info.reputation <= 0); + assert!(peer_info.reputation > INITIAL_REPUTATION); + } + + #[test] + fn decaying_max_reputation_finally_yields_zero() { + const INITIAL_REPUTATION: i32 = i32::MAX; + const SECONDS: u64 = 1000; + + let mut peer_info = PeerInfo::default(); + peer_info.reputation = INITIAL_REPUTATION; + + peer_info.decay_reputation(SECONDS / 2); + assert!(peer_info.reputation > 0); + + peer_info.decay_reputation(SECONDS / 2); + assert_eq!(peer_info.reputation, 0); + } + + #[test] + fn decaying_min_reputation_finally_yields_zero() { + const INITIAL_REPUTATION: i32 = i32::MIN; + const SECONDS: u64 = 1000; + + let mut peer_info = PeerInfo::default(); + peer_info.reputation = INITIAL_REPUTATION; + + peer_info.decay_reputation(SECONDS / 2); + assert!(peer_info.reputation < 0); + + peer_info.decay_reputation(SECONDS / 2); + assert_eq!(peer_info.reputation, 0); + } +} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index e7214d814dda8..9b94f28835284 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -17,8 +17,9 @@ // along with this program. If not, see . use crate::{ - config::{self, NonReservedPeerMode}, - error, + config, error, + peer_store::{PeerStoreHandle, PeerStoreProvider}, + protocol_controller::{self, SetId}, types::ProtocolName, }; @@ -36,7 +37,7 @@ use libp2p::{ use log::{debug, error, warn}; use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; -use sc_utils::mpsc::TracingUnboundedSender; +use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; use std::{ @@ -61,13 +62,10 @@ pub mod message; pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; /// Identifier of the peerset for the block announces protocol. -const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0); -/// Number of hardcoded peersets (the constants right above). Any set whose identifier is equal or -/// superior to this value corresponds to a user-defined protocol. -const NUM_HARDCODED_PEERSETS: usize = 1; +const HARDCODED_PEERSETS_SYNC: SetId = SetId::from(0); mod rep { - use sc_peerset::ReputationChange as Rep; + use crate::ReputationChange as Rep; /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } @@ -78,7 +76,7 @@ type PendingSyncSubstreamValidation = // Lock must always be taken in order declared here. pub struct Protocol { /// Used to report reputation changes. - peerset_handle: sc_peerset::PeersetHandle, + peer_store_handle: PeerStoreHandle, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: Notifications, /// List of notifications protocols that have been registered. @@ -89,8 +87,8 @@ pub struct Protocol { /// event to the outer layers, we also shouldn't propagate this "substream closed" event. To /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. - bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - /// Connected peers. + bad_handshake_substreams: HashSet<(PeerId, SetId)>, + /// Connected peers on sync protocol. peers: HashMap, sync_substream_validations: FuturesUnordered, tx: TracingUnboundedSender>, @@ -101,66 +99,17 @@ impl Protocol { /// Create a new instance. pub fn new( roles: Roles, - network_config: &config::NetworkConfiguration, notification_protocols: Vec, block_announces_protocol: config::NonDefaultSetConfig, + peer_store_handle: PeerStoreHandle, + protocol_controller_handles: Vec, + from_protocol_controllers: TracingUnboundedReceiver, tx: TracingUnboundedSender>, - ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { - let mut known_addresses = Vec::new(); - - let (peerset, peerset_handle) = { - let mut sets = - Vec::with_capacity(NUM_HARDCODED_PEERSETS + notification_protocols.len()); - - let mut default_sets_reserved = HashSet::new(); - for reserved in network_config.default_peers_set.reserved_nodes.iter() { - default_sets_reserved.insert(reserved.peer_id); - - if !reserved.multiaddr.is_empty() { - known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); - } - } - - let mut bootnodes = Vec::with_capacity(network_config.boot_nodes.len()); - for bootnode in network_config.boot_nodes.iter() { - bootnodes.push(bootnode.peer_id); - } - - // Set number 0 is used for block announces. - sets.push(sc_peerset::SetConfig { - in_peers: network_config.default_peers_set.in_peers, - out_peers: network_config.default_peers_set.out_peers, - bootnodes, - reserved_nodes: default_sets_reserved.clone(), - reserved_only: network_config.default_peers_set.non_reserved_mode == - NonReservedPeerMode::Deny, - }); - - for set_cfg in ¬ification_protocols { - let mut reserved_nodes = HashSet::new(); - for reserved in set_cfg.set_config.reserved_nodes.iter() { - reserved_nodes.insert(reserved.peer_id); - known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); - } - - let reserved_only = - set_cfg.set_config.non_reserved_mode == NonReservedPeerMode::Deny; - - sets.push(sc_peerset::SetConfig { - in_peers: set_cfg.set_config.in_peers, - out_peers: set_cfg.set_config.out_peers, - bootnodes: Vec::new(), - reserved_nodes, - reserved_only, - }); - } - - sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) - }; - + ) -> error::Result { let behaviour = { Notifications::new( - peerset, + protocol_controller_handles, + from_protocol_controllers, // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. // This protocol must be the first notification protocol given to // `Notifications` @@ -180,7 +129,7 @@ impl Protocol { }; let protocol = Self { - peerset_handle: peerset_handle.clone(), + peer_store_handle, behaviour, notification_protocols: iter::once(block_announces_protocol.notifications_protocol) .chain(notification_protocols.iter().map(|s| s.notifications_protocol.clone())) @@ -193,7 +142,7 @@ impl Protocol { _marker: Default::default(), }; - Ok((protocol, peerset_handle, known_addresses)) + Ok(protocol) } /// Returns the list of all the peers we have an open channel to. @@ -201,42 +150,28 @@ impl Protocol { self.behaviour.open_peers() } - /// Returns the number of discovered nodes that we keep in memory. - pub fn num_discovered_peers(&self) -> usize { - self.behaviour.num_discovered_peers() - } - /// Disconnects the given peer if we are connected to it. pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position)); - self.peers.remove(peer_id); + // Note: no need to remove a peer from `self.peers` if we are dealing with sync + // protocol, because it will be done when handling + // `NotificationsOut::CustomProtocolClosed`. + self.behaviour.disconnect_peer(peer_id, SetId::from(position)); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } } - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&mut self) -> serde_json::Value { - self.behaviour.peerset_debug_info() - } - - /// Returns the number of peers we're connected to. + /// Returns the number of peers we're connected to on sync protocol. pub fn num_connected_peers(&self) -> usize { self.peers.len() } - /// Adjusts the reputation of a node. - pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { - self.peerset_handle.report_peer(who, reputation) - } - /// Set handshake for the notification protocol. pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.behaviour - .set_notif_protocol_handshake(sc_peerset::SetId::from(index), handshake); + self.behaviour.set_notif_protocol_handshake(SetId::from(index), handshake); } else { error!( target: "sub-libp2p", @@ -245,105 +180,6 @@ impl Protocol { ); } } - - /// Set whether the syncing peers set is in reserved-only mode. - pub fn set_reserved_only(&self, reserved_only: bool) { - self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); - } - - /// Removes a `PeerId` from the list of reserved peers for syncing purposes. - pub fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer); - } - - /// Returns the list of reserved peers. - pub fn reserved_peers(&self) -> impl Iterator { - self.behaviour.reserved_peers(HARDCODED_PEERSETS_SYNC) - } - - /// Adds a `PeerId` to the list of reserved peers for syncing purposes. - pub fn add_reserved_peer(&self, peer: PeerId) { - self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer); - } - - /// Sets the list of reserved peers for syncing purposes. - pub fn set_reserved_peers(&self, peers: HashSet) { - self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers); - } - - /// Sets the list of reserved peers for the given protocol/peerset. - pub fn set_reserved_peerset_peers(&self, protocol: ProtocolName, peers: HashSet) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.set_reserved_peers(sc_peerset::SetId::from(index), peers); - } else { - error!( - target: "sub-libp2p", - "set_reserved_peerset_peers with unknown protocol: {}", - protocol - ); - } - } - - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index), peer); - } else { - error!( - target: "sub-libp2p", - "remove_set_reserved_peer with unknown protocol: {}", - protocol - ); - } - } - - /// Adds a `PeerId` to the list of reserved peers. - pub fn add_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index), peer); - } else { - error!( - target: "sub-libp2p", - "add_set_reserved_peer with unknown protocol: {}", - protocol - ); - } - } - - /// Notify the protocol that we have learned about the existence of nodes on the default set. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_default_set_discovered_nodes(&mut self, peer_ids: impl Iterator) { - for peer_id in peer_ids { - self.peerset_handle.add_to_peers_set(HARDCODED_PEERSETS_SYNC, peer_id); - } - } - - /// Add a peer to a peers set. - pub fn add_to_peers_set(&self, protocol: ProtocolName, peer: PeerId) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index), peer); - } else { - error!( - target: "sub-libp2p", - "add_to_peers_set with unknown protocol: {}", - protocol - ); - } - } - - /// Remove a peer from a peers set. - pub fn remove_from_peers_set(&self, protocol: ProtocolName, peer: PeerId) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index), peer); - } else { - error!( - target: "sub-libp2p", - "remove_from_peers_set with unknown protocol: {}", - protocol - ); - } - } } /// Outcome of an incoming custom message. @@ -474,6 +310,7 @@ impl NetworkBehaviour for Protocol { received_handshake, notifications_sink, negotiated_fallback, + inbound, } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { @@ -493,6 +330,7 @@ impl NetworkBehaviour for Protocol { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send( crate::SyncEvent::NotificationStreamOpened { + inbound, remote: peer_id, received_handshake: handshake, sink: notifications_sink, @@ -520,7 +358,7 @@ impl NetworkBehaviour for Protocol { peer_id, msg, ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None }, Err(err) => { @@ -533,6 +371,7 @@ impl NetworkBehaviour for Protocol { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send( crate::SyncEvent::NotificationStreamOpened { + inbound, remote: peer_id, received_handshake: handshake, sink: notifications_sink, @@ -561,7 +400,7 @@ impl NetworkBehaviour for Protocol { err, err2, ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None }, } @@ -598,8 +437,7 @@ impl NetworkBehaviour for Protocol { debug!(target: "sync", "Failed to parse remote handshake: {}", err); self.bad_handshake_substreams.insert((peer_id, set_id)); self.behaviour.disconnect_peer(&peer_id, set_id); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - self.peers.remove(&peer_id); + self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None }, } diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 7e56793939b55..89513e004c6df 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -20,6 +20,7 @@ use crate::{ protocol::notifications::handler::{ self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut, }, + protocol_controller::{self, IncomingIndex, Message, SetId}, types::ProtocolName, }; @@ -35,10 +36,10 @@ use libp2p::{ }, PeerId, }; -use log::{error, trace, warn}; +use log::{debug, error, info, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; -use sc_peerset::DropReason; +use sc_utils::mpsc::TracingUnboundedReceiver; use smallvec::SmallVec; use std::{ cmp, @@ -84,19 +85,17 @@ use std::{ /// the API of this behaviour and towards the peerset manager is aggregated in /// the following way: /// -/// 1. The enabled/disabled status is the same across all connections, as -/// decided by the peerset manager. -/// 2. `send_packet` and `write_notification` always send all data over -/// the same connection to preserve the ordering provided by the transport, -/// as long as that connection is open. If it closes, a second open -/// connection may take over, if one exists, but that case should be no -/// different than a single connection failing and being re-established -/// in terms of potential reordering and dropped messages. Messages can -/// be received on any connection. -/// 3. The behaviour reports `NotificationsOut::CustomProtocolOpen` when the -/// first connection reports `NotifsHandlerOut::OpenResultOk`. -/// 4. The behaviour reports `NotificationsOut::CustomProtocolClosed` when the -/// last connection reports `NotifsHandlerOut::ClosedResult`. +/// 1. The enabled/disabled status is the same across all connections, as decided by the peerset +/// manager. +/// 2. `send_packet` and `write_notification` always send all data over the same connection to +/// preserve the ordering provided by the transport, as long as that connection is open. If it +/// closes, a second open connection may take over, if one exists, but that case should be no +/// different than a single connection failing and being re-established in terms of potential +/// reordering and dropped messages. Messages can be received on any connection. +/// 3. The behaviour reports `NotificationsOut::CustomProtocolOpen` when the first connection +/// reports `NotifsHandlerOut::OpenResultOk`. +/// 4. The behaviour reports `NotificationsOut::CustomProtocolClosed` when the last connection +/// reports `NotifsHandlerOut::ClosedResult`. /// /// In this way, the number of actual established connections to the peer is /// an implementation detail of this behaviour. Note that, in practice and at @@ -107,11 +106,14 @@ pub struct Notifications { /// Notification protocols. Entries never change after initialization. notif_protocols: Vec, + /// Protocol controllers are responsible for peer connections management. + protocol_controller_handles: Vec, + /// Receiver for instructions about who to connect to or disconnect from. - peerset: sc_peerset::Peerset, + from_protocol_controllers: TracingUnboundedReceiver, /// List of peers in our state. - peers: FnvHashMap<(PeerId, sc_peerset::SetId), PeerState>, + peers: FnvHashMap<(PeerId, SetId), PeerState>, /// The elements in `peers` occasionally contain `Delay` objects that we would normally have /// to be polled one by one. In order to avoid doing so, as an optimization, every `Delay` is @@ -120,9 +122,8 @@ pub struct Notifications { /// /// By design, we never remove elements from this list. Elements are removed only when the /// `Delay` triggers. As such, this stream may produce obsolete elements. - delays: stream::FuturesUnordered< - Pin + Send>>, - >, + delays: + stream::FuturesUnordered + Send>>>, /// [`DelayId`] to assign to the next delay. next_delay_id: DelayId, @@ -133,7 +134,7 @@ pub struct Notifications { /// We generate indices to identify incoming connections. This is the next value for the index /// to use when a connection is incoming. - next_incoming_index: sc_peerset::IncomingIndex, + next_incoming_index: IncomingIndex, /// Events to produce from `poll()`. events: VecDeque>, @@ -231,6 +232,9 @@ enum PeerState { /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. backoff_until: Option, + /// Incoming index tracking this connection. + incoming_index: IncomingIndex, + /// List of connections with this peer, and their state. connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -293,12 +297,12 @@ struct IncomingPeer { /// Id of the remote peer of the incoming substream. peer_id: PeerId, /// Id of the set the incoming substream would belong to. - set_id: sc_peerset::SetId, + set_id: SetId, /// If true, this "incoming" still corresponds to an actual connection. If false, then the /// connection corresponding to it has been closed or replaced already. alive: bool, /// Id that the we sent to the peerset. - incoming_id: sc_peerset::IncomingIndex, + incoming_id: IncomingIndex, } /// Event that can be emitted by the `Notifications`. @@ -309,7 +313,7 @@ pub enum NotificationsOut { /// Id of the peer we are connected to. peer_id: PeerId, /// Peerset set ID the substream is tied to. - set_id: sc_peerset::SetId, + set_id: SetId, /// If `Some`, a fallback protocol name has been used rather the main protocol name. /// Always matches one of the fallback names passed at initialization. negotiated_fallback: Option, @@ -318,6 +322,8 @@ pub enum NotificationsOut { received_handshake: Vec, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, + /// Is the connection inbound. + inbound: bool, }, /// The [`NotificationsSink`] object used to send notifications with the given peer must be @@ -329,7 +335,7 @@ pub enum NotificationsOut { /// Id of the peer we are connected to. peer_id: PeerId, /// Peerset set ID the substream is tied to. - set_id: sc_peerset::SetId, + set_id: SetId, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -340,7 +346,7 @@ pub enum NotificationsOut { /// Id of the peer we were connected to. peer_id: PeerId, /// Peerset set ID the substream was tied to. - set_id: sc_peerset::SetId, + set_id: SetId, }, /// Receives a message on a custom protocol substream. @@ -350,7 +356,7 @@ pub enum NotificationsOut { /// Id of the peer the message came from. peer_id: PeerId, /// Peerset set ID the substream is tied to. - set_id: sc_peerset::SetId, + set_id: SetId, /// Message that has been received. message: BytesMut, }, @@ -359,7 +365,8 @@ pub enum NotificationsOut { impl Notifications { /// Creates a `CustomProtos`. pub fn new( - peerset: sc_peerset::Peerset, + protocol_controller_handles: Vec, + from_protocol_controllers: TracingUnboundedReceiver, notif_protocols: impl Iterator, ) -> Self { let notif_protocols = notif_protocols @@ -375,12 +382,13 @@ impl Notifications { Self { notif_protocols, - peerset, + protocol_controller_handles, + from_protocol_controllers, peers: FnvHashMap::default(), delays: Default::default(), next_delay_id: DelayId(0), incoming: SmallVec::new(), - next_incoming_index: sc_peerset::IncomingIndex(0), + next_incoming_index: IncomingIndex(0), events: VecDeque::new(), } } @@ -388,7 +396,7 @@ impl Notifications { /// Modifies the handshake of the given notifications protocol. pub fn set_notif_protocol_handshake( &mut self, - set_id: sc_peerset::SetId, + set_id: SetId, handshake_message: impl Into>, ) { if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) { @@ -399,29 +407,24 @@ impl Notifications { } } - /// Returns the number of discovered nodes that we keep in memory. - pub fn num_discovered_peers(&self) -> usize { - self.peerset.num_discovered_peers() - } - /// Returns the list of all the peers we have an open channel to. pub fn open_peers(&self) -> impl Iterator { self.peers.iter().filter(|(_, state)| state.is_open()).map(|((id, _), _)| id) } /// Returns true if we have an open substream to the given peer. - pub fn is_open(&self, peer_id: &PeerId, set_id: sc_peerset::SetId) -> bool { + pub fn is_open(&self, peer_id: &PeerId, set_id: SetId) -> bool { self.peers.get(&(*peer_id, set_id)).map(|p| p.is_open()).unwrap_or(false) } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: sc_peerset::SetId) { + pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: SetId) { trace!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id); self.disconnect_peer_inner(peer_id, set_id); } /// Inner implementation of `disconnect_peer`. - fn disconnect_peer_inner(&mut self, peer_id: &PeerId, set_id: sc_peerset::SetId) { + fn disconnect_peer_inner(&mut self, peer_id: &PeerId, set_id: SetId) { let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { entry } else { @@ -438,7 +441,7 @@ impl Notifications { // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id); *entry.into_mut() = PeerState::Disabled { connections, backoff_until: Some(timer_deadline) } }, @@ -448,7 +451,7 @@ impl Notifications { // If relevant, the external API is instantly notified. PeerState::Enabled { mut connections } => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id); if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); @@ -493,7 +496,7 @@ impl Notifications { // Incoming => Disabled. // Ongoing opening requests from the remote are rejected. - PeerState::Incoming { mut connections, backoff_until } => { + PeerState::Incoming { mut connections, backoff_until, .. } => { let inc = if let Some(inc) = self .incoming .iter_mut() @@ -535,18 +538,8 @@ impl Notifications { } } - /// Returns the list of reserved peers. - pub fn reserved_peers(&self, set_id: sc_peerset::SetId) -> impl Iterator { - self.peerset.reserved_peers(set_id) - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&mut self) -> serde_json::Value { - self.peerset.debug_info() - } - /// Function that is called when the peerset wants us to connect to a peer. - fn peerset_report_connect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + fn peerset_report_connect(&mut self, peer_id: PeerId, set_id: SetId) { // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. let mut occ_entry = match self.peers.entry((peer_id, set_id)) { Entry::Occupied(entry) => entry, @@ -686,65 +679,34 @@ impl Notifications { }; } }, - - // Incoming => Enabled - PeerState::Incoming { mut connections, .. } => { - trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", - occ_entry.key().0, set_id); - if let Some(inc) = self - .incoming - .iter_mut() - .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) - { - inc.alive = false; - } else { - error!( - target: "sub-libp2p", - "State mismatch in libp2p: no entry in incoming for incoming peer", - ) - } - - debug_assert!(connections - .iter() - .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); - for (connec_id, connec_state) in connections - .iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) - { - trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", - occ_entry.key(), *connec_id, set_id); - self.events.push_back(ToSwarm::NotifyHandler { - peer_id: occ_entry.key().0, - handler: NotifyHandler::One(*connec_id), - event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, - }); - *connec_state = ConnectionState::Opening; - } - - *occ_entry.into_mut() = PeerState::Enabled { connections }; + // Incoming => Incoming + st @ PeerState::Incoming { .. } => { + debug!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): Ignoring obsolete connect, we are awaiting accept/reject.", + occ_entry.key().0, set_id + ); + *occ_entry.into_mut() = st; }, // Other states are kept as-is. st @ PeerState::Enabled { .. } => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Already connected.", occ_entry.key().0, set_id); *occ_entry.into_mut() = st; - debug_assert!(false); }, st @ PeerState::DisabledPendingEnable { .. } => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Already pending enabling.", occ_entry.key().0, set_id); *occ_entry.into_mut() = st; - debug_assert!(false); }, st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Duplicate request.", occ_entry.key().0, set_id); *occ_entry.into_mut() = st; - debug_assert!(false); }, PeerState::Poisoned => { @@ -755,7 +717,7 @@ impl Notifications { } /// Function that is called when the peerset wants us to disconnect from a peer. - fn peerset_report_disconnect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + fn peerset_report_disconnect(&mut self, peer_id: PeerId, set_id: SetId) { let mut entry = match self.peers.entry((peer_id, set_id)) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { @@ -847,10 +809,12 @@ impl Notifications { // Invalid state transitions. st @ PeerState::Incoming { .. } => { - error!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not enabled (Incoming).", - entry.key().0, set_id); + info!( + target: "sub-libp2p", + "PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.", + entry.key().0, set_id, + ); *entry.into_mut() = st; - debug_assert!(false); }, PeerState::Poisoned => { error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); @@ -861,7 +825,7 @@ impl Notifications { /// Function that is called when the peerset wants us to accept a connection /// request from a peer. - fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { + fn peerset_report_accept(&mut self, index: IncomingIndex) { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) @@ -879,7 +843,8 @@ impl Notifications { _ => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", incoming.peer_id, incoming.set_id); - self.peerset.dropped(incoming.set_id, incoming.peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(incoming.set_id)] + .dropped(incoming.peer_id); }, } return @@ -895,7 +860,24 @@ impl Notifications { match mem::replace(state, PeerState::Poisoned) { // Incoming => Enabled - PeerState::Incoming { mut connections, .. } => { + PeerState::Incoming { mut connections, incoming_index, .. } => { + if index < incoming_index { + warn!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", + index, incoming.peer_id, incoming.set_id, incoming_index + ); + return + } else if index > incoming_index { + error!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.", + index, incoming.peer_id, incoming.set_id, incoming_index + ); + debug_assert!(false); + return + } + trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", index, incoming.peer_id, incoming.set_id); @@ -930,7 +912,7 @@ impl Notifications { } /// Function that is called when the peerset wants us to reject an incoming peer. - fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { + fn peerset_report_reject(&mut self, index: IncomingIndex) { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) @@ -955,7 +937,24 @@ impl Notifications { match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled - PeerState::Incoming { mut connections, backoff_until } => { + PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + if index < incoming_index { + warn!( + target: "sub-libp2p", + "PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", + index, incoming.peer_id, incoming.set_id, incoming_index + ); + return + } else if index > incoming_index { + error!( + target: "sub-libp2p", + "PSM => Reject({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.", + index, incoming.peer_id, incoming.set_id, incoming_index + ); + debug_assert!(false); + return + } + trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", index, incoming.peer_id, incoming.set_id); @@ -1047,7 +1046,7 @@ impl NetworkBehaviour for Notifications { connection_id, .. }) => { - for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + for set_id in (0..self.notif_protocols.len()).map(SetId::from) { match self.peers.entry((peer_id, set_id)).or_insert(PeerState::Poisoned) { // Requested | PendingRequest => Enabled st @ &mut PeerState::Requested | @@ -1101,7 +1100,7 @@ impl NetworkBehaviour for Notifications { } }, FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, .. }) => { - for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + for set_id in (0..self.notif_protocols.len()).map(SetId::from) { let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id, set_id)) { @@ -1183,7 +1182,8 @@ impl NetworkBehaviour for Notifications { if connections.is_empty() { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)] + .dropped(peer_id); *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; } else { *entry.get_mut() = PeerState::DisabledPendingEnable { @@ -1195,7 +1195,7 @@ impl NetworkBehaviour for Notifications { }, // Incoming => Incoming | Disabled | Backoff | Ø - PeerState::Incoming { mut connections, backoff_until } => { + PeerState::Incoming { mut connections, backoff_until, incoming_index } => { trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", @@ -1223,8 +1223,9 @@ impl NetworkBehaviour for Notifications { // If no connection is `OpenDesiredByRemote` anymore, clean up the // peerset incoming request. if no_desired_left { - // In the incoming state, we don't report "Dropped". Instead we will - // just ignore the corresponding Accept/Reject. + // In the incoming state, we don't report "Dropped" straight away. + // Instead we will report "Dropped" if receive the corresponding + // "Accept". if let Some(state) = self .incoming .iter_mut() @@ -1269,8 +1270,11 @@ impl NetworkBehaviour for Notifications { *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } else { - *entry.get_mut() = - PeerState::Incoming { connections, backoff_until }; + *entry.get_mut() = PeerState::Incoming { + connections, + backoff_until, + incoming_index, + }; } }, @@ -1333,7 +1337,8 @@ impl NetworkBehaviour for Notifications { if connections.is_empty() { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)] + .dropped(peer_id); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); let delay_id = self.next_delay_id; @@ -1355,7 +1360,8 @@ impl NetworkBehaviour for Notifications { matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) }) { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)] + .dropped(peer_id); *entry.get_mut() = PeerState::Disabled { connections, backoff_until: None }; @@ -1390,7 +1396,7 @@ impl NetworkBehaviour for Notifications { if let Some(peer_id) = peer_id { trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + for set_id in (0..self.notif_protocols.len()).map(SetId::from) { if let Entry::Occupied(mut entry) = self.peers.entry((peer_id, set_id)) { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // The peer is not in our list. @@ -1403,7 +1409,8 @@ impl NetworkBehaviour for Notifications { st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id, DropReason::Unknown); + self.protocol_controller_handles[usize::from(set_id)] + .dropped(peer_id); let now = Instant::now(); let ban_duration = match st { @@ -1469,7 +1476,7 @@ impl NetworkBehaviour for Notifications { ) { match event { NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", "Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})", @@ -1489,7 +1496,7 @@ impl NetworkBehaviour for Notifications { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming - PeerState::Incoming { mut connections, backoff_until } => { + PeerState::Incoming { mut connections, backoff_until, incoming_index } => { debug_assert!(connections .iter() .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); @@ -1517,7 +1524,8 @@ impl NetworkBehaviour for Notifications { debug_assert!(false); } - *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + *entry.into_mut() = + PeerState::Incoming { connections, backoff_until, incoming_index }; }, PeerState::Enabled { mut connections } => { @@ -1572,9 +1580,10 @@ impl NetworkBehaviour for Notifications { let incoming_id = self.next_incoming_index; self.next_incoming_index.0 += 1; - trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - peer_id, incoming_id); - self.peerset.incoming(set_id, peer_id, incoming_id); + trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}, {:?}).", + peer_id, set_id, incoming_id); + self.protocol_controller_handles[usize::from(set_id)] + .incoming_connection(peer_id, incoming_id); self.incoming.push(IncomingPeer { peer_id, set_id, @@ -1582,8 +1591,11 @@ impl NetworkBehaviour for Notifications { incoming_id, }); - *entry.into_mut() = - PeerState::Incoming { connections, backoff_until }; + *entry.into_mut() = PeerState::Incoming { + connections, + backoff_until, + incoming_index: incoming_id, + }; } else { // Connections in `OpeningThenClosing` and `Closing` state can be // in a Closed phase, and as such can emit `OpenDesiredByRemote` @@ -1655,7 +1667,7 @@ impl NetworkBehaviour for Notifications { }, NotifsHandlerOut::CloseDesired { protocol_index } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", "Handler({}, {:?}) => CloseDesired({:?})", @@ -1710,7 +1722,7 @@ impl NetworkBehaviour for Notifications { _ => None, }) { if pos <= replacement_pos { - trace!(target: "sub-libp2p", "External API <= Sink replaced({:?})", peer_id); + trace!(target: "sub-libp2p", "External API <= Sink replaced({:?}, {:?})", peer_id, set_id); let event = NotificationsOut::CustomProtocolReplaced { peer_id, set_id, @@ -1727,7 +1739,8 @@ impl NetworkBehaviour for Notifications { .any(|(_, s)| matches!(s, ConnectionState::Opening)) { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id, DropReason::Refused); + self.protocol_controller_handles[usize::from(set_id)] + .dropped(peer_id); *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None }; } else { @@ -1755,7 +1768,7 @@ impl NetworkBehaviour for Notifications { }, NotifsHandlerOut::CloseResult { protocol_index } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", "Handler({}, {:?}) => CloseResult({:?})", @@ -1792,9 +1805,10 @@ impl NetworkBehaviour for Notifications { negotiated_fallback, received_handshake, notifications_sink, + inbound, .. } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", "Handler({}, {:?}) => OpenResultOk({:?})", peer_id, connection_id, set_id); @@ -1816,6 +1830,7 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolOpen { peer_id, set_id, + inbound, negotiated_fallback, received_handshake, notifications_sink: notifications_sink.clone(), @@ -1860,7 +1875,7 @@ impl NetworkBehaviour for Notifications { }, NotifsHandlerOut::OpenResultErr { protocol_index } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", "Handler({:?}, {:?}) => OpenResultErr({:?})", peer_id, connection_id, set_id); @@ -1900,8 +1915,8 @@ impl NetworkBehaviour for Notifications { if !connections.iter().any(|(_, s)| { matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) }) { - trace!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(set_id, peer_id, DropReason::Refused); + trace!(target: "sub-libp2p", "PSM <= Dropped({:?}, {:?})", peer_id, set_id); + self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); *entry.into_mut() = PeerState::Disabled { @@ -1949,7 +1964,7 @@ impl NetworkBehaviour for Notifications { }, NotifsHandlerOut::Notification { protocol_index, message } => { - let set_id = sc_peerset::SetId::from(protocol_index); + let set_id = SetId::from(protocol_index); if self.is_open(&peer_id, set_id) { trace!( target: "sub-libp2p", @@ -1991,24 +2006,26 @@ impl NetworkBehaviour for Notifications { return Poll::Ready(event) } - // Poll for instructions from the peerset. - // Note that the peerset is a *best effort* crate, and we have to use defensive programming. + // Poll for instructions from the protocol controllers. loop { - match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { - Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { + match futures::Stream::poll_next(Pin::new(&mut self.from_protocol_controllers), cx) { + Poll::Ready(Some(Message::Accept(index))) => { self.peerset_report_accept(index); }, - Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { + Poll::Ready(Some(Message::Reject(index))) => { self.peerset_report_reject(index); }, - Poll::Ready(Some(sc_peerset::Message::Connect { peer_id, set_id, .. })) => { + Poll::Ready(Some(Message::Connect { peer_id, set_id, .. })) => { self.peerset_report_connect(peer_id, set_id); }, - Poll::Ready(Some(sc_peerset::Message::Drop { peer_id, set_id, .. })) => { + Poll::Ready(Some(Message::Drop { peer_id, set_id, .. })) => { self.peerset_report_disconnect(peer_id, set_id); }, Poll::Ready(None) => { - error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); + error!( + target: "sub-libp2p", + "Protocol controllers receiver stream has returned `None`. Ignore this error if the node is shutting down.", + ); break }, Poll::Pending => break, @@ -2027,12 +2044,12 @@ impl NetworkBehaviour for Notifications { match peer_state { PeerState::Backoff { timer, .. } if *timer == delay_id => { - trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state ({:?})", peer_id, set_id); self.peers.remove(&(peer_id, set_id)); }, PeerState::PendingRequest { timer, .. } if *timer == delay_id => { - trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired ({:?})", peer_id, set_id); self.events.push_back(ToSwarm::Dial { opts: peer_id.into() }); *peer_state = PeerState::Requested; }, @@ -2085,8 +2102,13 @@ impl NetworkBehaviour for Notifications { #[allow(deprecated)] mod tests { use super::*; - use crate::protocol::notifications::handler::tests::*; + use crate::{ + mock::MockPeerStore, + protocol::notifications::handler::tests::*, + protocol_controller::{IncomingIndex, ProtoSetConfig, ProtocolController}, + }; use libp2p::swarm::AddressRecord; + use sc_utils::mpsc::tracing_unbounded; use std::{collections::HashSet, iter}; impl PartialEq for ConnectionState { @@ -2132,24 +2154,26 @@ mod tests { } } - fn development_notifs() -> (Notifications, sc_peerset::PeersetHandle) { - let (peerset, peerset_handle) = { - let mut sets = Vec::with_capacity(1); + fn development_notifs() -> (Notifications, ProtocolController) { + let (to_notifications, from_controller) = + tracing_unbounded("test_controller_to_notifications", 10_000); - sets.push(sc_peerset::SetConfig { + let (handle, controller) = ProtocolController::new( + SetId::from(0), + ProtoSetConfig { in_peers: 25, out_peers: 25, - bootnodes: Vec::new(), reserved_nodes: HashSet::new(), reserved_only: false, - }); - - sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) - }; + }, + to_notifications, + Box::new(MockPeerStore {}), + ); ( Notifications::new( - peerset, + vec![handle], + from_controller, iter::once(ProtocolConfig { name: "/foo".into(), fallback_names: Vec::new(), @@ -2157,13 +2181,13 @@ mod tests { max_notification_size: u64::MAX, }), ), - peerset_handle, + controller, ) } #[test] fn update_handshake() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let inner = notif.notif_protocols.get_mut(0).unwrap().handshake.read().clone(); assert_eq!(inner, vec![1, 2, 3, 4]); @@ -2178,14 +2202,14 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn update_unknown_handshake() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); notif.set_notif_protocol_handshake(1337.into(), vec![5, 6, 7, 8]); } #[test] fn disconnect_backoff_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2202,7 +2226,7 @@ mod tests { #[test] fn disconnect_pending_request() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2219,7 +2243,7 @@ mod tests { #[test] fn disconnect_requested_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); notif.peers.insert((peer, 0.into()), PeerState::Requested); @@ -2230,7 +2254,7 @@ mod tests { #[test] fn disconnect_disabled_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( (peer, 0.into()), @@ -2246,7 +2270,7 @@ mod tests { #[test] fn remote_opens_connection_and_substream() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2279,7 +2303,7 @@ mod tests { NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); - if let Some(&PeerState::Incoming { ref connections, backoff_until: None }) = + if let Some(&PeerState::Incoming { ref connections, backoff_until: None, .. }) = notif.peers.get(&(peer, 0.into())) { assert_eq!(connections.len(), 1); @@ -2290,13 +2314,13 @@ mod tests { assert!(std::matches!( notif.incoming.pop(), - Some(IncomingPeer { alive: true, incoming_id: sc_peerset::IncomingIndex(0), .. }), + Some(IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }), )); } #[tokio::test] - async fn disconnect_remote_substream_before_handled_by_peerset() { - let (mut notif, _peerset) = development_notifs(); + async fn disconnect_remote_substream_before_handled_by_controller() { + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2332,8 +2356,8 @@ mod tests { #[test] fn peerset_report_connect_backoff() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2397,10 +2421,10 @@ mod tests { #[test] fn peerset_connect_incoming() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2424,15 +2448,17 @@ mod tests { NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); - // attempt to connect to the peer and verify that the peer state is `Enabled` - notif.peerset_report_connect(peer, set_id); + // attempt to connect to the peer and verify that the peer state is `Enabled`; + // we rely on implementation detail that incoming indices are counted from 0 + // to not mock the `Peerset` + notif.peerset_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); } #[test] fn peerset_disconnect_disable_pending_enable() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2478,10 +2504,10 @@ mod tests { #[test] fn peerset_disconnect_enabled() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2502,7 +2528,9 @@ mod tests { conn, NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); - notif.peerset_report_connect(peer, set_id); + // we rely on the implementation detail that incoming indices are counted from 0 + // to not mock the `Peerset` + notif.peerset_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // disconnect peer and verify that the state is `Disabled` @@ -2512,9 +2540,9 @@ mod tests { #[test] fn peerset_disconnect_requested() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); // Set peer into `Requested` state. notif.peerset_report_connect(peer, set_id); @@ -2527,8 +2555,8 @@ mod tests { #[test] fn peerset_disconnect_pending_request() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2580,10 +2608,10 @@ mod tests { #[test] fn peerset_accept_peer_not_alive() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2610,28 +2638,28 @@ mod tests { assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: true, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, )); notif.disconnect_peer(&peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: false, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: false, incoming_id: IncomingIndex(0), .. }, )); - notif.peerset_report_accept(sc_peerset::IncomingIndex(0)); + notif.peerset_report_accept(IncomingIndex(0)); assert_eq!(notif.incoming.len(), 0); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(PeerState::Disabled { .. }))); } #[test] fn secondary_connection_peer_state_incoming() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2682,10 +2710,10 @@ mod tests { #[test] fn close_connection_for_disabled_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2716,10 +2744,10 @@ mod tests { #[test] fn close_connection_for_incoming_peer_one_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2755,17 +2783,17 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_none()); assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: false, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: false, incoming_id: IncomingIndex(0), .. }, )); } #[test] fn close_connection_for_incoming_peer_two_connections() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn1 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2830,10 +2858,10 @@ mod tests { #[test] fn connection_and_substream_open() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2859,7 +2887,9 @@ mod tests { ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - notif.peerset_report_connect(peer, set_id); + // We rely on the implementation detail that incoming indices are counted + // from 0 to not mock the `Peerset`. + notif.peerset_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // open new substream @@ -2882,11 +2912,11 @@ mod tests { #[test] fn connection_closed_sink_replaced() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -2948,8 +2978,6 @@ mod tests { // check peer information assert_eq!(notif.open_peers().collect::>(), vec![&peer],); - assert_eq!(notif.reserved_peers(set_id).collect::>(), Vec::<&PeerId>::new(),); - assert_eq!(notif.num_discovered_peers(), 0usize); // close the other connection and verify that notification replacement event is emitted notif.on_swarm_event(FromSwarm::ConnectionClosed( @@ -2978,9 +3006,9 @@ mod tests { #[test] fn dial_failure_for_requested_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); // Set peer into `Requested` state. notif.peerset_report_connect(peer, set_id); @@ -3001,10 +3029,10 @@ mod tests { #[tokio::test] async fn write_notification() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3050,8 +3078,8 @@ mod tests { #[test] fn peerset_report_connect_backoff_expired() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -3098,9 +3126,9 @@ mod tests { #[test] fn peerset_report_disconnect_disabled() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -3124,8 +3152,8 @@ mod tests { #[test] fn peerset_report_disconnect_backoff() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -3170,8 +3198,8 @@ mod tests { #[test] fn peer_is_backed_off_if_both_connections_get_closed_while_peer_is_disabled_with_back_off() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3243,9 +3271,9 @@ mod tests { #[test] fn inject_connection_closed_incoming_with_backoff() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -3294,11 +3322,11 @@ mod tests { #[test] fn two_connections_inactive_connection_gets_closed_peer_state_is_still_incoming() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3349,11 +3377,11 @@ mod tests { #[test] fn two_connections_active_connection_gets_closed_peer_state_is_disabled() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3407,11 +3435,11 @@ mod tests { #[test] fn inject_connection_closed_for_active_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3475,8 +3503,8 @@ mod tests { #[test] fn inject_dial_failure_for_pending_request() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -3538,9 +3566,9 @@ mod tests { #[test] fn peerstate_incoming_open_desired_by_remote() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); let connected = ConnectedPoint::Listener { @@ -3592,9 +3620,9 @@ mod tests { #[tokio::test] async fn remove_backoff_peer_after_timeout() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -3670,10 +3698,10 @@ mod tests { #[tokio::test] async fn reschedule_disabled_pending_enable_when_connection_not_closed() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3703,7 +3731,9 @@ mod tests { ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - notif.peerset_report_connect(peer, set_id); + // we rely on the implementation detail that incoming indices are counted from 0 + // to not mock the `Peerset` + notif.peerset_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); @@ -3786,10 +3816,10 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_enabled_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -3834,11 +3864,10 @@ mod tests { } #[test] - #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_disabled_pending_enable_peer() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -3872,30 +3901,35 @@ mod tests { Some(&PeerState::DisabledPendingEnable { .. }) )); + // duplicate "connect" must not change the state notif.peerset_report_connect(peer, set_id); + assert!(std::matches!( + notif.peers.get(&(peer, set_id)), + Some(&PeerState::DisabledPendingEnable { .. }) + )); } #[test] - #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_requested_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); // Set peer into `Requested` state. notif.peerset_report_connect(peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Requested))); + // Duplicate "connect" must not change the state. notif.peerset_report_connect(peer, set_id); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Requested))); } #[test] - #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_pending_requested() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -3940,16 +3974,20 @@ mod tests { Some(&PeerState::PendingRequest { .. }) )); + // duplicate "connect" must not change the state notif.peerset_report_connect(peer, set_id); + assert!(std::matches!( + notif.peers.get(&(peer, set_id)), + Some(&PeerState::PendingRequest { .. }) + )); } #[test] - #[should_panic] #[cfg(debug_assertions)] - fn peerset_report_disconnect_with_incoming_peer() { - let (mut notif, _peerset) = development_notifs(); + fn peerset_report_connect_with_incoming_peer() { + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -3973,20 +4011,55 @@ mod tests { conn, NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + notif.peerset_report_connect(peer, set_id); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + } + + #[test] + #[cfg(debug_assertions)] + fn peerset_report_disconnect_with_incoming_peer() { + let (mut notif, _controller) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); notif.peerset_report_disconnect(peer, set_id); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); } #[test] #[should_panic] #[cfg(debug_assertions)] fn peerset_report_accept_incoming_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4013,21 +4086,21 @@ mod tests { assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: true, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, )); notif.peers.remove(&(peer, set_id)); - notif.peerset_report_accept(sc_peerset::IncomingIndex(0)); + notif.peerset_report_accept(IncomingIndex(0)); } #[test] #[should_panic] #[cfg(debug_assertions)] fn peerset_report_accept_not_incoming_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4055,7 +4128,7 @@ mod tests { assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: true, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, )); notif.peerset_report_connect(peer, set_id); @@ -4066,14 +4139,14 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); notif.incoming[0].alive = true; - notif.peerset_report_accept(sc_peerset::IncomingIndex(0)); + notif.peerset_report_accept(IncomingIndex(0)); } #[test] #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_non_existent_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let endpoint = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -4093,9 +4166,9 @@ mod tests { #[test] fn disconnect_non_existent_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); notif.peerset_report_disconnect(peer, set_id); @@ -4105,7 +4178,7 @@ mod tests { #[test] fn accept_non_existent_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); notif.peerset_report_accept(0.into()); @@ -4115,7 +4188,7 @@ mod tests { #[test] fn reject_non_existent_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); notif.peerset_report_reject(0.into()); @@ -4125,10 +4198,10 @@ mod tests { #[test] fn reject_non_active_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4163,10 +4236,10 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn reject_non_existent_peer_but_alive_connection() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4192,7 +4265,7 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); assert!(std::matches!( notif.incoming[0], - IncomingPeer { alive: true, incoming_id: sc_peerset::IncomingIndex(0), .. }, + IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, )); notif.peers.remove(&(peer, set_id)); @@ -4203,10 +4276,10 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_incoming_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4246,8 +4319,8 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_peer() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -4281,8 +4354,8 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_pending_enable() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -4332,10 +4405,10 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_incoming_peer_state_mismatch() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4376,10 +4449,10 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_enabled_state_mismatch() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); - let set_id = sc_peerset::SetId::from(0); + let set_id = SetId::from(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4423,8 +4496,8 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_backoff_peer() { - let (mut notif, _peerset) = development_notifs(); - let set_id = sc_peerset::SetId::from(0); + let (mut notif, _controller) = development_notifs(); + let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -4477,7 +4550,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn open_result_ok_non_existent_peer() { - let (mut notif, _peerset) = development_notifs(); + let (mut notif, _controller) = development_notifs(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index 665b646ecdcfa..cffdec7d71ee4 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -203,6 +203,8 @@ enum State { Opening { /// Substream opened by the remote. If `Some`, has been accepted. in_substream: Option>, + /// Is the connection inbound. + inbound: bool, }, /// Protocol is in the "Open" state. @@ -276,6 +278,8 @@ pub enum NotifsHandlerOut { received_handshake: Vec, /// How notifications can be sent to this node. notifications_sink: NotificationsSink, + /// Is the connection inbound. + inbound: bool, }, /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open @@ -467,7 +471,7 @@ impl ConnectionHandler for NotifsHandler { match event { ConnectionEvent::FullyNegotiatedInbound(inbound) => { let (mut in_substream_open, protocol_index) = inbound.protocol; - let mut protocol_info = &mut self.protocols[protocol_index]; + let protocol_info = &mut self.protocols[protocol_index]; match protocol_info.state { State::Closed { pending_opening } => { @@ -518,7 +522,7 @@ impl ConnectionHandler for NotifsHandler { error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); debug_assert!(false); }, - State::Opening { ref mut in_substream } => { + State::Opening { ref mut in_substream, inbound } => { let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); let notifications_sink = NotificationsSink { @@ -543,6 +547,7 @@ impl ConnectionHandler for NotifsHandler { endpoint: self.endpoint.clone(), received_handshake: new_open.handshake, notifications_sink, + inbound, }, )); }, @@ -597,7 +602,7 @@ impl ConnectionHandler for NotifsHandler { ); } - protocol_info.state = State::Opening { in_substream: None }; + protocol_info.state = State::Opening { in_substream: None, inbound: false }; }, State::OpenDesiredByRemote { pending_opening, in_substream } => { let handshake_message = protocol_info.config.handshake.read().clone(); @@ -623,12 +628,13 @@ impl ConnectionHandler for NotifsHandler { // The state change is done in two steps because of borrowing issues. let in_substream = match mem::replace( &mut protocol_info.state, - State::Opening { in_substream: None }, + State::Opening { in_substream: None, inbound: false }, ) { State::OpenDesiredByRemote { in_substream, .. } => in_substream, _ => unreachable!(), }; - protocol_info.state = State::Opening { in_substream: Some(in_substream) }; + protocol_info.state = + State::Opening { in_substream: Some(in_substream), inbound: true }; }, State::Opening { .. } | State::Open { .. } => { // As documented, it is forbidden to send an `Open` while there is already @@ -772,7 +778,7 @@ impl ConnectionHandler for NotifsHandler { match &mut self.protocols[protocol_index].state { State::Closed { .. } | State::Open { in_substream: None, .. } | - State::Opening { in_substream: None } => {}, + State::Opening { in_substream: None, .. } => {}, State::Open { in_substream: in_substream @ Some(_), .. } => match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { @@ -893,6 +899,7 @@ pub mod tests { endpoint, received_handshake, notifications_sink, + inbound: false, } } @@ -1131,7 +1138,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); // remote now tries to open another substream, verify that it is rejected and closed @@ -1168,7 +1175,7 @@ pub mod tests { .await; assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); } @@ -1204,7 +1211,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); // accept the substream and move its state to `Open` @@ -1295,7 +1302,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); handler.on_behaviour_event(NotifsHandlerIn::Close { protocol_index: 0 }); @@ -1355,7 +1362,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); handler.on_behaviour_event(NotifsHandlerIn::Close { protocol_index: 0 }); @@ -1438,7 +1445,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); handler.on_behaviour_event(NotifsHandlerIn::Close { protocol_index: 0 }); @@ -1487,7 +1494,7 @@ pub mod tests { handler.on_behaviour_event(NotifsHandlerIn::Open { protocol_index: 0 }); assert!(std::matches!( handler.protocols[0].state, - State::Opening { in_substream: Some(_) } + State::Opening { in_substream: Some(_), .. } )); handler.on_behaviour_event(NotifsHandlerIn::Close { protocol_index: 0 }); diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index d13a4fcfa3809..d57c24144f571 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -18,9 +18,13 @@ #![cfg(test)] -use crate::protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}; +use crate::{ + peer_store::PeerStore, + protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}, + protocol_controller::{ProtoSetConfig, ProtocolController, SetId}, +}; -use futures::prelude::*; +use futures::{future::BoxFuture, prelude::*}; use libp2p::{ core::{transport::MemoryTransport, upgrade, Endpoint}, identity, noise, @@ -31,6 +35,7 @@ use libp2p::{ }, yamux, Multiaddr, PeerId, Transport, }; +use sc_utils::mpsc::tracing_unbounded; use std::{ iter, pin::Pin, @@ -65,23 +70,31 @@ fn build_nodes() -> (Swarm, Swarm) { .timeout(Duration::from_secs(20)) .boxed(); - let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - sets: vec![sc_peerset::SetConfig { + let peer_store = PeerStore::new(if index == 0 { + keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect() + } else { + vec![] + }); + + let (to_notifications, from_controller) = + tracing_unbounded("test_protocol_controller_to_notifications", 10_000); + + let (controller_handle, controller) = ProtocolController::new( + SetId::from(0), + ProtoSetConfig { in_peers: 25, out_peers: 25, - bootnodes: if index == 0 { - keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect() - } else { - vec![] - }, reserved_nodes: Default::default(), reserved_only: false, - }], - }); + }, + to_notifications, + Box::new(peer_store.handle()), + ); let behaviour = CustomProtoWithAddr { inner: Notifications::new( - peerset, + vec![controller_handle], + from_controller, iter::once(ProtocolConfig { name: "/foo".into(), fallback_names: Vec::new(), @@ -89,6 +102,8 @@ fn build_nodes() -> (Swarm, Swarm) { max_notification_size: 1024 * 1024, }), ), + peer_store_future: peer_store.run().boxed(), + protocol_controller_future: controller.run().boxed(), addrs: addrs .iter() .enumerate() @@ -124,6 +139,8 @@ fn build_nodes() -> (Swarm, Swarm) { /// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. struct CustomProtoWithAddr { inner: Notifications, + peer_store_future: BoxFuture<'static, ()>, + protocol_controller_future: BoxFuture<'static, ()>, addrs: Vec<(PeerId, Multiaddr)>, } @@ -222,6 +239,8 @@ impl NetworkBehaviour for CustomProtoWithAddr { cx: &mut Context, params: &mut impl PollParameters, ) -> Poll>> { + let _ = self.peer_store_future.poll_unpin(cx); + let _ = self.protocol_controller_future.poll_unpin(cx); self.inner.poll(cx, params) } } @@ -264,10 +283,9 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; if service2_state == ServiceState::FirstConnec { - service1.behaviour_mut().disconnect_peer( - Swarm::local_peer_id(&service2), - sc_peerset::SetId::from(0), - ); + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); } }, ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, @@ -287,10 +305,9 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; if service1_state == ServiceState::FirstConnec { - service1.behaviour_mut().disconnect_peer( - Swarm::local_peer_id(&service2), - sc_peerset::SetId::from(0), - ); + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); } }, ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, @@ -307,8 +324,20 @@ fn reconnect_after_disconnect() { _ => {}, } + // Due to the bug in `Notifications`, the disconnected node does not always detect that + // it was disconnected. The closed inbound substream is tolerated by design, and the + // closed outbound substream is not detected until something is sent into it. + // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). + // This happens if the disconnecting node reconnects to it fast enough. + // In this case the disconnected node does not transit via `ServiceState::NotConnected` + // and stays in `ServiceState::FirstConnec`. + // TODO: update this once the fix is finally merged. if service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::ConnectedAgain + service2_state == ServiceState::ConnectedAgain || + service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::FirstConnec || + service1_state == ServiceState::FirstConnec && + service2_state == ServiceState::ConnectedAgain { break } diff --git a/client/network/src/protocol_controller.rs b/client/network/src/protocol_controller.rs new file mode 100644 index 0000000000000..c9baa0a77d4ba --- /dev/null +++ b/client/network/src/protocol_controller.rs @@ -0,0 +1,2018 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Protocol Controller. Generic implementation of peer management for protocols. +//! Responsible for accepting/rejecting incoming connections and initiating outgoing connections, +//! respecting the inbound and outbound peer slot counts. Communicates with `PeerStore` to get and +//! update peer reputation values and sends commands to `Notifications`. +//! +//! Due to asynchronous nature of communication between `ProtocolController` and `Notifications`, +//! `ProtocolController` has an imperfect view of the states of the peers. To reduce this +//! desynchronization, the following measures are taken: +//! +//! 1. Network peer events from `Notifications` are prioritized over actions from external API and +//! internal actions by `ProtocolController` (like slot allocation). +//! 2. `Notifications` ignores all commands from `ProtocolController` after sending "incoming" +//! request until receiving the answer to this "incoming" request. +//! 3. After sending a "connect" message, `ProtocolController` switches the state of the peer from +//! `Outbound` to `Inbound` if it receives an "incoming" request from `Notifications` for this +//! peer. +//! +//! These measures do not eliminate confusing commands from `ProtocolController` completely, +//! so `Notifications` must correctly handle seemingly inconsistent commands, like a "connect" +//! command for the peer it thinks is already connected, and a "drop" command for a peer that +//! was previously dropped. +//! +//! Even though this does not guarantee that `ProtocolController` and `Notifications` have the same +//! view of the peers' states at any given moment, the eventual consistency is maintained. + +use futures::{channel::oneshot, future::Either, FutureExt, StreamExt}; +use libp2p::PeerId; +use log::{debug, error, trace, warn}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_arithmetic::traits::SaturatedConversion; +use std::{ + collections::{HashMap, HashSet}, + time::{Duration, Instant}, +}; +use wasm_timer::Delay; + +use crate::peer_store::PeerStoreProvider; + +/// Log target for this file. +pub const LOG_TARGET: &str = "peerset"; + +/// `Notifications` protocol index. For historical reasons it's called `SetId`, because it +/// used to refer to a set of peers in a peerset for this protocol. +/// +/// Can be constructed using the `From` trait implementation based on the index of the +/// protocol in `Notifications`. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SetId(usize); + +impl SetId { + /// Const conversion function for initialization of hardcoded peerset indices. + pub const fn from(id: usize) -> Self { + Self(id) + } +} + +impl From for SetId { + fn from(id: usize) -> Self { + Self(id) + } +} + +impl From for usize { + fn from(id: SetId) -> Self { + id.0 + } +} + +/// Configuration for a set of nodes for a specific protocol. +#[derive(Debug)] +pub struct ProtoSetConfig { + /// Maximum number of incoming links to peers. + pub in_peers: u32, + + /// Maximum number of outgoing links to peers. + pub out_peers: u32, + + /// Lists of nodes we should always be connected to. + /// + /// > **Note**: Keep in mind that the networking has to know an address for these nodes, + /// > otherwise it will not be able to connect to them. + pub reserved_nodes: HashSet, + + /// If true, we only accept nodes in [`ProtoSetConfig::reserved_nodes`]. + pub reserved_only: bool, +} + +/// Message that is sent by [`ProtocolController`] to `Notifications`. +#[derive(Debug, PartialEq)] +pub enum Message { + /// Request to open a connection to the given peer. From the point of view of the + /// `ProtocolController`, we are immediately connected. + Connect { + /// Set id to connect on. + set_id: SetId, + /// Peer to connect to. + peer_id: PeerId, + }, + + /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. + Drop { + /// Set id to disconnect on. + set_id: SetId, + /// Peer to disconnect from. + peer_id: PeerId, + }, + + /// Equivalent to `Connect` for the peer corresponding to this incoming index. + Accept(IncomingIndex), + + /// Equivalent to `Drop` for the peer corresponding to this incoming index. + Reject(IncomingIndex), +} + +/// Opaque identifier for an incoming connection. Allocated by the network. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct IncomingIndex(pub u64); + +impl From for IncomingIndex { + fn from(val: u64) -> Self { + Self(val) + } +} + +/// External API actions. +#[derive(Debug)] +enum Action { + /// Add a reserved peer or mark already connected peer as reserved. + AddReservedPeer(PeerId), + /// Remove a reserved peer. + RemoveReservedPeer(PeerId), + /// Update reserved peers to match the provided set. + SetReservedPeers(HashSet), + /// Set/unset reserved-only mode. + SetReservedOnly(bool), + /// Disconnect a peer. + DisconnectPeer(PeerId), + /// Get the list of reserved peers. + GetReservedPeers(oneshot::Sender>), +} + +/// Network events from `Notifications`. +#[derive(Debug)] +enum Event { + /// Incoming connection from the peer. + IncomingConnection(PeerId, IncomingIndex), + /// Connection with the peer dropped. + Dropped(PeerId), +} + +/// Shared handle to [`ProtocolController`]. Distributed around the code outside of the +/// protocol implementation. +#[derive(Debug, Clone)] +pub struct ProtocolHandle { + /// Actions from outer API. + actions_tx: TracingUnboundedSender, + /// Connection events from `Notifications`. We prioritize them over actions. + events_tx: TracingUnboundedSender, +} + +impl ProtocolHandle { + /// Adds a new reserved peer. [`ProtocolController`] will make an effort + /// to always remain connected to this peer. + /// + /// Has no effect if the node was already a reserved peer. + /// + /// > **Note**: Keep in mind that the networking has to know an address for this node, + /// > otherwise it will not be able to connect to it. + pub fn add_reserved_peer(&self, peer_id: PeerId) { + let _ = self.actions_tx.unbounded_send(Action::AddReservedPeer(peer_id)); + } + + /// Demotes reserved peer to non-reserved. Does not disconnect the peer. + /// + /// Has no effect if the node was not a reserved peer. + pub fn remove_reserved_peer(&self, peer_id: PeerId) { + let _ = self.actions_tx.unbounded_send(Action::RemoveReservedPeer(peer_id)); + } + + /// Set reserved peers to the new set. + pub fn set_reserved_peers(&self, peer_ids: HashSet) { + let _ = self.actions_tx.unbounded_send(Action::SetReservedPeers(peer_ids)); + } + + /// Sets whether or not [`ProtocolController`] only has connections with nodes marked + /// as reserved for the given set. + pub fn set_reserved_only(&self, reserved: bool) { + let _ = self.actions_tx.unbounded_send(Action::SetReservedOnly(reserved)); + } + + /// Disconnect peer. You should remove the `PeerId` from the `PeerStore` first + /// to not connect to the peer again during the next slot allocation. + pub fn disconnect_peer(&self, peer_id: PeerId) { + let _ = self.actions_tx.unbounded_send(Action::DisconnectPeer(peer_id)); + } + + /// Get the list of reserved peers. + pub fn reserved_peers(&self, pending_response: oneshot::Sender>) { + let _ = self.actions_tx.unbounded_send(Action::GetReservedPeers(pending_response)); + } + + /// Notify about incoming connection. [`ProtocolController`] will either accept or reject it. + pub fn incoming_connection(&self, peer_id: PeerId, incoming_index: IncomingIndex) { + let _ = self + .events_tx + .unbounded_send(Event::IncomingConnection(peer_id, incoming_index)); + } + + /// Notify that connection was dropped (either refused or disconnected). + pub fn dropped(&self, peer_id: PeerId) { + let _ = self.events_tx.unbounded_send(Event::Dropped(peer_id)); + } +} + +/// Direction of a connection +#[derive(Clone, Copy, Debug)] +enum Direction { + Inbound, + Outbound, +} + +/// Status of a connection with a peer. +#[derive(Clone, Debug)] +enum PeerState { + /// We are connected to the peer. + Connected(Direction), + /// We are not connected. + NotConnected, +} + +impl PeerState { + /// Returns true if we are connected with the node. + fn is_connected(&self) -> bool { + matches!(self, PeerState::Connected(_)) + } +} + +impl Default for PeerState { + fn default() -> PeerState { + PeerState::NotConnected + } +} + +/// Worker side of [`ProtocolHandle`] responsible for all the logic. +#[derive(Debug)] +pub struct ProtocolController { + /// Set id to use when sending connect/drop requests to `Notifications`. + // Will likely be replaced by `ProtocolName` in the future. + set_id: SetId, + /// Receiver for outer API messages from [`ProtocolHandle`]. + actions_rx: TracingUnboundedReceiver, + /// Receiver for connection events from `Notifications` sent via [`ProtocolHandle`]. + events_rx: TracingUnboundedReceiver, + /// Number of occupied slots for incoming connections (not counting reserved nodes). + num_in: u32, + /// Number of occupied slots for outgoing connections (not counting reserved nodes). + num_out: u32, + /// Maximum number of slots for incoming connections (not counting reserved nodes). + max_in: u32, + /// Maximum number of slots for outgoing connections (not counting reserved nodes). + max_out: u32, + /// Connected regular nodes. + nodes: HashMap, + /// Reserved nodes. Should be always connected and do not occupy peer slots. + reserved_nodes: HashMap, + /// Connect only to reserved nodes. + reserved_only: bool, + /// Next time to allocate slots. This is done once per second. + next_periodic_alloc_slots: Instant, + /// Outgoing channel for messages to `Notifications`. + to_notifications: TracingUnboundedSender, + /// `PeerStore` handle for checking peer reputation values and getting connection candidates + /// with highest reputation. + peer_store: Box, +} + +impl ProtocolController { + /// Construct new [`ProtocolController`]. + pub fn new( + set_id: SetId, + config: ProtoSetConfig, + to_notifications: TracingUnboundedSender, + peer_store: Box, + ) -> (ProtocolHandle, ProtocolController) { + let (actions_tx, actions_rx) = tracing_unbounded("mpsc_api_protocol", 10_000); + let (events_tx, events_rx) = tracing_unbounded("mpsc_notifications_protocol", 10_000); + let handle = ProtocolHandle { actions_tx, events_tx }; + peer_store.register_protocol(handle.clone()); + let reserved_nodes = + config.reserved_nodes.iter().map(|p| (*p, PeerState::NotConnected)).collect(); + let controller = ProtocolController { + set_id, + actions_rx, + events_rx, + num_in: 0, + num_out: 0, + max_in: config.in_peers, + max_out: config.out_peers, + nodes: HashMap::new(), + reserved_nodes, + reserved_only: config.reserved_only, + next_periodic_alloc_slots: Instant::now(), + to_notifications, + peer_store, + }; + (handle, controller) + } + + /// Drive [`ProtocolController`]. This function returns when all instances of + /// [`ProtocolHandle`] are dropped. + pub async fn run(mut self) { + while self.next_action().await {} + } + + /// Perform one action. Returns `true` if it should be called again. + /// + /// Intended for tests only. Use `run` for driving [`ProtocolController`]. + pub async fn next_action(&mut self) -> bool { + let either = loop { + let mut next_alloc_slots = Delay::new_at(self.next_periodic_alloc_slots).fuse(); + + // See the module doc for why we use `select_biased!`. + futures::select_biased! { + event = self.events_rx.next() => match event { + Some(event) => break Either::Left(event), + None => return false, + }, + action = self.actions_rx.next() => match action { + Some(action) => break Either::Right(action), + None => return false, + }, + _ = next_alloc_slots => { + self.alloc_slots(); + self.next_periodic_alloc_slots = Instant::now() + Duration::new(1, 0); + }, + } + }; + + match either { + Either::Left(event) => self.process_event(event), + Either::Right(action) => self.process_action(action), + } + + true + } + + /// Process connection event. + fn process_event(&mut self, event: Event) { + match event { + Event::IncomingConnection(peer_id, index) => + self.on_incoming_connection(peer_id, index), + Event::Dropped(peer_id) => self.on_peer_dropped(peer_id), + } + } + + /// Process action command. + fn process_action(&mut self, action: Action) { + match action { + Action::AddReservedPeer(peer_id) => self.on_add_reserved_peer(peer_id), + Action::RemoveReservedPeer(peer_id) => self.on_remove_reserved_peer(peer_id), + Action::SetReservedPeers(peer_ids) => self.on_set_reserved_peers(peer_ids), + Action::SetReservedOnly(reserved_only) => self.on_set_reserved_only(reserved_only), + Action::DisconnectPeer(peer_id) => self.on_disconnect_peer(peer_id), + Action::GetReservedPeers(pending_response) => + self.on_get_reserved_peers(pending_response), + } + } + + /// Send "accept" message to `Notifications`. + fn accept_connection(&mut self, peer_id: PeerId, incoming_index: IncomingIndex) { + trace!( + target: LOG_TARGET, + "Accepting {peer_id} ({incoming_index:?}) on {:?} ({}/{} num_in/max_in).", + self.set_id, + self.num_in, + self.max_in, + ); + + let _ = self.to_notifications.unbounded_send(Message::Accept(incoming_index)); + } + + /// Send "reject" message to `Notifications`. + fn reject_connection(&mut self, peer_id: PeerId, incoming_index: IncomingIndex) { + trace!( + target: LOG_TARGET, + "Rejecting {peer_id} ({incoming_index:?}) on {:?} ({}/{} num_in/max_in).", + self.set_id, + self.num_in, + self.max_in, + ); + + let _ = self.to_notifications.unbounded_send(Message::Reject(incoming_index)); + } + + /// Send "connect" message to `Notifications`. + fn start_connection(&mut self, peer_id: PeerId) { + trace!( + target: LOG_TARGET, + "Connecting to {peer_id} on {:?} ({}/{} num_out/max_out).", + self.set_id, + self.num_out, + self.max_out, + ); + + let _ = self + .to_notifications + .unbounded_send(Message::Connect { set_id: self.set_id, peer_id }); + } + + /// Send "drop" message to `Notifications`. + fn drop_connection(&mut self, peer_id: PeerId) { + trace!( + target: LOG_TARGET, + "Dropping {peer_id} on {:?} ({}/{} num_in/max_in, {}/{} num_out/max_out).", + self.set_id, + self.num_in, + self.max_in, + self.num_out, + self.max_out, + ); + + let _ = self + .to_notifications + .unbounded_send(Message::Drop { set_id: self.set_id, peer_id }); + } + + /// Report peer disconnect event to `PeerStore` for it to update peer's reputation accordingly. + /// Should only be called if the remote node disconnected us, not the other way around. + fn report_disconnect(&mut self, peer_id: PeerId) { + self.peer_store.report_disconnect(peer_id); + } + + /// Ask `Peerset` if the peer has a reputation value not sufficent for connection with it. + fn is_banned(&self, peer_id: &PeerId) -> bool { + self.peer_store.is_banned(peer_id) + } + + /// Add the peer to the set of reserved peers. [`ProtocolController`] will try to always + /// maintain connections with such peers. + fn on_add_reserved_peer(&mut self, peer_id: PeerId) { + if self.reserved_nodes.contains_key(&peer_id) { + warn!( + target: LOG_TARGET, + "Trying to add an already reserved node {peer_id} as reserved on {:?}.", + self.set_id, + ); + return + } + + // Get the peer out of non-reserved peers if it's there. + let state = match self.nodes.remove(&peer_id) { + Some(direction) => { + trace!( + target: LOG_TARGET, + "Marking previously connected node {} ({:?}) as reserved on {:?}.", + peer_id, + direction, + self.set_id + ); + PeerState::Connected(direction) + }, + None => { + trace!(target: LOG_TARGET, "Adding reserved node {peer_id} on {:?}.", self.set_id,); + PeerState::NotConnected + }, + }; + + self.reserved_nodes.insert(peer_id, state.clone()); + + // Discount occupied slots or connect to the node. + match state { + PeerState::Connected(Direction::Inbound) => self.num_in -= 1, + PeerState::Connected(Direction::Outbound) => self.num_out -= 1, + PeerState::NotConnected => self.alloc_slots(), + } + } + + /// Remove the peer from the set of reserved peers. The peer is moved to the set of regular + /// nodes. + fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { + let state = match self.reserved_nodes.remove(&peer_id) { + Some(state) => state, + None => { + warn!( + target: LOG_TARGET, + "Trying to remove unknown reserved node {peer_id} from {:?}.", self.set_id, + ); + return + }, + }; + + if let PeerState::Connected(direction) = state { + if self.reserved_only { + // Disconnect the node. + trace!( + target: LOG_TARGET, + "Disconnecting previously reserved node {peer_id} ({direction:?}) on {:?}.", + self.set_id, + ); + self.drop_connection(peer_id); + } else { + // Count connections as of regular node. + trace!( + target: LOG_TARGET, + "Making a connected reserved node {peer_id} ({:?}) on {:?} a regular one.", + direction, + self.set_id, + ); + + match direction { + Direction::Inbound => self.num_in += 1, + Direction::Outbound => self.num_out += 1, + } + + // Put the node into the list of regular nodes. + let prev = self.nodes.insert(peer_id, direction); + assert!(prev.is_none(), "Corrupted state: reserved node was also non-reserved."); + } + } else { + trace!( + target: LOG_TARGET, + "Removed disconnected reserved node {peer_id} from {:?}.", + self.set_id, + ); + } + } + + /// Replace the set of reserved peers. + fn on_set_reserved_peers(&mut self, peer_ids: HashSet) { + // Determine the difference between the current group and the new list. + let current = self.reserved_nodes.keys().cloned().collect(); + let to_insert = peer_ids.difference(¤t).cloned().collect::>(); + let to_remove = current.difference(&peer_ids).cloned().collect::>(); + + for node in to_insert { + self.on_add_reserved_peer(node); + } + + for node in to_remove { + self.on_remove_reserved_peer(node); + } + } + + /// Change "reserved only" flag. In "reserved only" mode we connect and accept connections to + /// reserved nodes only. + fn on_set_reserved_only(&mut self, reserved_only: bool) { + trace!(target: LOG_TARGET, "Set reserved only to `{reserved_only}` on {:?}", self.set_id); + + self.reserved_only = reserved_only; + + if !reserved_only { + return self.alloc_slots() + } + + // Disconnect all non-reserved peers. + self.nodes + .iter() + .map(|(k, v)| (*k, *v)) + .collect::>() + .iter() + .for_each(|(peer_id, direction)| { + // Update counters in the loop for `drop_connection` to report the correct number. + match direction { + Direction::Inbound => self.num_in -= 1, + Direction::Outbound => self.num_out -= 1, + } + self.drop_connection(*peer_id) + }); + self.nodes.clear(); + } + + /// Get the list of reserved peers. + fn on_get_reserved_peers(&self, pending_response: oneshot::Sender>) { + let _ = pending_response.send(self.reserved_nodes.keys().cloned().collect()); + } + + /// Disconnect the peer. + fn on_disconnect_peer(&mut self, peer_id: PeerId) { + // Don't do anything if the node is reserved. + if self.reserved_nodes.contains_key(&peer_id) { + debug!( + target: LOG_TARGET, + "Ignoring request to disconnect reserved peer {peer_id} from {:?}.", self.set_id, + ); + return + } + + match self.nodes.remove(&peer_id) { + Some(direction) => { + trace!( + target: LOG_TARGET, + "Disconnecting peer {peer_id} ({direction:?}) from {:?}.", + self.set_id + ); + match direction { + Direction::Inbound => self.num_in -= 1, + Direction::Outbound => self.num_out -= 1, + } + self.drop_connection(peer_id); + }, + None => { + debug!( + target: LOG_TARGET, + "Trying to disconnect unknown peer {peer_id} from {:?}.", self.set_id, + ); + }, + } + } + + /// Indicate that we received an incoming connection. Must be answered either with + /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. + /// + /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming + /// connection implicitly means `Connect`, but incoming connections aren't cancelled by + /// `dropped`. + // Implementation note: because of concurrency issues, `ProtocolController` has an imperfect + // view of the peers' states, and may issue commands for a peer after `Notifications` received + // an incoming request for that peer. In this case, `Notifications` ignores all the commands + // until it receives a response for the incoming request to `ProtocolController`, so we must + // ensure we handle this incoming request correctly. + fn on_incoming_connection(&mut self, peer_id: PeerId, incoming_index: IncomingIndex) { + trace!( + target: LOG_TARGET, + "Incoming connection from peer {peer_id} ({incoming_index:?}) on {:?}.", + self.set_id, + ); + + if self.reserved_only && !self.reserved_nodes.contains_key(&peer_id) { + self.reject_connection(peer_id, incoming_index); + return + } + + // Check if the node is reserved first. + if let Some(state) = self.reserved_nodes.get_mut(&peer_id) { + match state { + PeerState::Connected(ref mut direction) => { + // We are accepting an incoming connection, so ensure the direction is inbound. + // (See the implementation note above.) + *direction = Direction::Inbound; + self.accept_connection(peer_id, incoming_index); + }, + PeerState::NotConnected => + if self.peer_store.is_banned(&peer_id) { + self.reject_connection(peer_id, incoming_index); + } else { + *state = PeerState::Connected(Direction::Inbound); + self.accept_connection(peer_id, incoming_index); + }, + } + return + } + + // If we're already connected, pretend we are not connected and decide on the node again. + // (See the note above.) + if let Some(direction) = self.nodes.remove(&peer_id) { + trace!( + target: LOG_TARGET, + "Handling incoming connection from peer {} we think we already connected as {:?} on {:?}.", + peer_id, + direction, + self.set_id + ); + match direction { + Direction::Inbound => self.num_in -= 1, + Direction::Outbound => self.num_out -= 1, + } + } + + if self.num_in >= self.max_in { + self.reject_connection(peer_id, incoming_index); + return + } + + if self.is_banned(&peer_id) { + self.reject_connection(peer_id, incoming_index); + return + } + + self.num_in += 1; + self.nodes.insert(peer_id, Direction::Inbound); + self.accept_connection(peer_id, incoming_index); + } + + /// Indicate that a connection with the peer was dropped. + fn on_peer_dropped(&mut self, peer_id: PeerId) { + self.on_peer_dropped_inner(peer_id).unwrap_or_else(|peer_id| { + // We do not assert here, because due to asynchronous nature of communication + // between `ProtocolController` and `Notifications` we can receive `Action::Dropped` + // for a peer we already disconnected ourself. + trace!( + target: LOG_TARGET, + "Received `Action::Dropped` for not connected peer {peer_id} on {:?}.", + self.set_id, + ) + }); + } + + /// Indicate that a connection with the peer was dropped. + /// Returns `Err(PeerId)` if the peer wasn't connected or is not known to us. + fn on_peer_dropped_inner(&mut self, peer_id: PeerId) -> Result<(), PeerId> { + if self.drop_reserved_peer(&peer_id)? || self.drop_regular_peer(&peer_id) { + // The peer found and disconnected. + self.report_disconnect(peer_id); + Ok(()) + } else { + // The peer was not found in neither regular or reserved lists. + Err(peer_id) + } + } + + /// Try dropping the peer as a reserved peer. Return `Ok(true)` if the peer was found and + /// disconnected, `Ok(false)` if it wasn't found, `Err(PeerId)`, if the peer found, but not in + /// connected state. + fn drop_reserved_peer(&mut self, peer_id: &PeerId) -> Result { + let Some(state) = self.reserved_nodes.get_mut(peer_id) else { return Ok(false) }; + + if let PeerState::Connected(direction) = state { + trace!( + target: LOG_TARGET, + "Reserved peer {peer_id} ({direction:?}) dropped from {:?}.", + self.set_id, + ); + *state = PeerState::NotConnected; + Ok(true) + } else { + Err(*peer_id) + } + } + + /// Try dropping the peer as a regular peer. Return `true` if the peer was found and + /// disconnected, `false` if it wasn't found. + fn drop_regular_peer(&mut self, peer_id: &PeerId) -> bool { + let Some(direction) = self.nodes.remove(peer_id) else { return false }; + + trace!( + target: LOG_TARGET, + "Peer {peer_id} ({direction:?}) dropped from {:?}.", + self.set_id, + ); + + match direction { + Direction::Inbound => self.num_in -= 1, + Direction::Outbound => self.num_out -= 1, + } + + true + } + + /// Initiate outgoing connections trying to connect all reserved nodes and fill in all outgoing + /// slots. + fn alloc_slots(&mut self) { + // Try connecting to reserved nodes first, ignoring nodes with outstanding events/actions. + self.reserved_nodes + .iter_mut() + .filter_map(|(peer_id, state)| { + (!state.is_connected() && !self.peer_store.is_banned(peer_id)).then(|| { + *state = PeerState::Connected(Direction::Outbound); + peer_id + }) + }) + .cloned() + .collect::>() + .into_iter() + .for_each(|peer_id| { + self.start_connection(peer_id); + }); + + // Nothing more to do if we're in reserved-only mode or don't have slots available. + if self.reserved_only || self.num_out >= self.max_out { + return + } + + // Fill available slots. + let available_slots = (self.max_out - self.num_out).saturated_into(); + + // Ignore reserved nodes (connected above), already connected nodes, and nodes with + // outstanding events/actions. + let ignored = self + .reserved_nodes + .keys() + .collect::>() + .union(&self.nodes.keys().collect::>()) + .cloned() + .collect(); + + let candidates = self + .peer_store + .outgoing_candidates(available_slots, ignored) + .into_iter() + .filter_map(|peer_id| { + (!self.reserved_nodes.contains_key(&peer_id) && !self.nodes.contains_key(&peer_id)) + .then_some(peer_id) + .or_else(|| { + error!( + target: LOG_TARGET, + "`PeerStore` returned a node we asked to ignore: {peer_id}.", + ); + debug_assert!(false, "`PeerStore` returned a node we asked to ignore."); + None + }) + }) + .collect::>(); + + if candidates.len() > available_slots { + error!( + target: LOG_TARGET, + "`PeerStore` returned more nodes than there are slots available.", + ); + debug_assert!(false, "`PeerStore` returned more nodes than there are slots available."); + } + + candidates.into_iter().take(available_slots).for_each(|peer_id| { + self.num_out += 1; + self.nodes.insert(peer_id, Direction::Outbound); + self.start_connection(peer_id); + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{peer_store::PeerStoreProvider, ReputationChange}; + use libp2p::PeerId; + use sc_utils::mpsc::{tracing_unbounded, TryRecvError}; + use std::collections::HashSet; + + mockall::mock! { + #[derive(Debug)] + pub PeerStoreHandle {} + + impl PeerStoreProvider for PeerStoreHandle { + fn is_banned(&self, peer_id: &PeerId) -> bool; + fn register_protocol(&self, protocol_handle: ProtocolHandle); + fn report_disconnect(&mut self, peer_id: PeerId); + fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); + fn peer_reputation(&self, peer_id: &PeerId) -> i32; + fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec; + } + } + + #[test] + fn reserved_nodes_are_connected_dropped_and_accepted() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + // Add first reserved node via config. + let config = ProtoSetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: std::iter::once(reserved1).collect(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(4).return_const(false); + peer_store.expect_report_disconnect().times(2).return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Add second reserved node at runtime (this currently calls `alloc_slots` internally). + controller.on_add_reserved_peer(reserved2); + + // Initiate connections (currently, `alloc_slots` is also called internally in + // `on_add_reserved_peer` above). + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + + // Reserved peers do not occupy slots. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Drop connections to be able to accept reserved nodes. + controller.on_peer_dropped(reserved1); + controller.on_peer_dropped(reserved2); + + // Incoming connection from `reserved1`. + let incoming1 = IncomingIndex(1); + controller.on_incoming_connection(reserved1, incoming1); + assert_eq!(rx.try_recv().unwrap(), Message::Accept(incoming1)); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Incoming connection from `reserved2`. + let incoming2 = IncomingIndex(2); + controller.on_incoming_connection(reserved2, incoming2); + assert_eq!(rx.try_recv().unwrap(), Message::Accept(incoming2)); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Reserved peers do not occupy slots. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn banned_reserved_nodes_are_not_connected_and_not_accepted() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + // Add first reserved node via config. + let config = ProtoSetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: std::iter::once(reserved1).collect(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(6).return_const(true); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Add second reserved node at runtime (this currently calls `alloc_slots` internally). + controller.on_add_reserved_peer(reserved2); + + // Initiate connections. + controller.alloc_slots(); + + // No slots occupied. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // No commands are generated. + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Incoming connection from `reserved1`. + let incoming1 = IncomingIndex(1); + controller.on_incoming_connection(reserved1, incoming1); + assert_eq!(rx.try_recv().unwrap(), Message::Reject(incoming1)); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Incoming connection from `reserved2`. + let incoming2 = IncomingIndex(2); + controller.on_incoming_connection(reserved2, incoming2); + assert_eq!(rx.try_recv().unwrap(), Message::Reject(incoming2)); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // No slots occupied. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn we_try_to_reconnect_to_dropped_reserved_nodes() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + // Add first reserved node via config. + let config = ProtoSetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: std::iter::once(reserved1).collect(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(4).return_const(false); + peer_store.expect_report_disconnect().times(2).return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Add second reserved node at runtime (this calls `alloc_slots` internally). + controller.on_add_reserved_peer(reserved2); + + // Initiate connections (actually redundant, see previous comment). + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + + // Drop both reserved nodes. + controller.on_peer_dropped(reserved1); + controller.on_peer_dropped(reserved2); + + // Initiate connections. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + + // No slots occupied. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn nodes_supplied_by_peer_store_are_connected() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let candidates = vec![peer1, peer2]; + + let config = ProtoSetConfig { + in_peers: 0, + // Less slots than candidates. + out_peers: 2, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_outgoing_candidates().once().return_const(candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + // Only first two peers are connected (we only have 2 slots). + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer2 })); + + // Outgoing slots occupied. + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + + // No more nodes are connected. + controller.alloc_slots(); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // No more slots occupied. + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn both_reserved_nodes_and_nodes_supplied_by_peer_store_are_connected() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + let regular1 = PeerId::random(); + let regular2 = PeerId::random(); + let outgoing_candidates = vec![regular1, regular2]; + let reserved_nodes = [reserved1, reserved2].iter().cloned().collect(); + + let config = + ProtoSetConfig { in_peers: 10, out_peers: 10, reserved_nodes, reserved_only: false }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(2).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 4); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: regular1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: regular2 })); + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn if_slots_are_freed_we_try_to_allocate_them_again() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + let candidates1 = vec![peer1, peer2]; + let candidates2 = vec![peer3]; + + let config = ProtoSetConfig { + in_peers: 0, + // Less slots than candidates. + out_peers: 2, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_outgoing_candidates().once().return_const(candidates1); + peer_store.expect_outgoing_candidates().once().return_const(candidates2); + peer_store.expect_report_disconnect().times(2).return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + // Only first two peers are connected (we only have 2 slots). + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer2 })); + + // Outgoing slots occupied. + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + + // No more nodes are connected. + controller.alloc_slots(); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // No more slots occupied. + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + + // Drop peers. + controller.on_peer_dropped(peer1); + controller.on_peer_dropped(peer2); + + // Slots are freed. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Initiate connections. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + // Peers are connected. + assert_eq!(messages.len(), 1); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer3 })); + + // Outgoing slots occupied. + assert_eq!(controller.num_out, 1); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn in_reserved_only_mode_no_peers_are_requested_from_peer_store_and_connected() { + let config = ProtoSetConfig { + in_peers: 0, + // Make sure we have slots available. + out_peers: 2, + reserved_nodes: HashSet::new(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + + // No nodes are connected. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + } + + #[test] + fn in_reserved_only_mode_no_regular_peers_are_accepted() { + let config = ProtoSetConfig { + // Make sure we have slots available. + in_peers: 2, + out_peers: 0, + reserved_nodes: HashSet::new(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + let peer = PeerId::random(); + let incoming_index = IncomingIndex(1); + controller.on_incoming_connection(peer, incoming_index); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + // Peer is rejected. + assert_eq!(messages.len(), 1); + assert!(messages.contains(&Message::Reject(incoming_index))); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn disabling_reserved_only_mode_allows_to_connect_to_peers() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let candidates = vec![peer1, peer2]; + + let config = ProtoSetConfig { + in_peers: 0, + // Make sure we have slots available. + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_outgoing_candidates().once().return_const(candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + + // No nodes are connected. + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Disable reserved-only mode (this also connects to peers). + controller.on_set_reserved_only(false); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer2 })); + assert_eq!(controller.num_out, 2); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn enabling_reserved_only_mode_disconnects_regular_peers() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + let regular1 = PeerId::random(); + let regular2 = PeerId::random(); + let outgoing_candidates = vec![regular1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [reserved1, reserved2].iter().cloned().collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(3).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Connect `regular1` as outbound. + controller.alloc_slots(); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 3); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: regular1 })); + assert_eq!(controller.num_out, 1); + assert_eq!(controller.num_in, 0); + + // Connect `regular2` as inbound. + let incoming_index = IncomingIndex(1); + controller.on_incoming_connection(regular2, incoming_index); + assert_eq!(rx.try_recv().unwrap(), Message::Accept(incoming_index)); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.num_out, 1); + assert_eq!(controller.num_in, 1); + + // Switch to reserved-only mode. + controller.on_set_reserved_only(true); + + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Drop { set_id: SetId::from(0), peer_id: regular1 })); + assert!(messages.contains(&Message::Drop { set_id: SetId::from(0), peer_id: regular2 })); + assert_eq!(controller.nodes.len(), 0); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn removed_disconnected_reserved_node_is_forgotten() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [reserved1, reserved2].iter().cloned().collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert_eq!(controller.reserved_nodes.len(), 2); + assert_eq!(controller.nodes.len(), 0); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + controller.on_remove_reserved_peer(reserved1); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.reserved_nodes.len(), 1); + assert!(!controller.reserved_nodes.contains_key(&reserved1)); + assert_eq!(controller.nodes.len(), 0); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + } + + #[test] + fn removed_connected_reserved_node_is_disconnected_in_reserved_only_mode() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [reserved1, reserved2].iter().cloned().collect(), + reserved_only: true, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(2).return_const(false); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Initiate connections. + controller.alloc_slots(); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved1 })); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + assert_eq!(controller.reserved_nodes.len(), 2); + assert!(controller.reserved_nodes.contains_key(&reserved1)); + assert!(controller.reserved_nodes.contains_key(&reserved2)); + assert!(controller.nodes.is_empty()); + + // Remove reserved node + controller.on_remove_reserved_peer(reserved1); + assert_eq!( + rx.try_recv().unwrap(), + Message::Drop { set_id: SetId::from(0), peer_id: reserved1 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.reserved_nodes.len(), 1); + assert!(controller.reserved_nodes.contains_key(&reserved2)); + assert!(controller.nodes.is_empty()); + } + + #[test] + fn removed_connected_reserved_nodes_become_regular_in_non_reserved_mode() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [peer1, peer2].iter().cloned().collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(2).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(Vec::new()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `peer1` as inbound, `peer2` as outbound. + controller.on_incoming_connection(peer1, IncomingIndex(1)); + controller.alloc_slots(); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer2 })); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Remove reserved nodes (and make them regular) + controller.on_remove_reserved_peer(peer1); + controller.on_remove_reserved_peer(peer2); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.nodes.len(), 2); + assert!(matches!(controller.nodes.get(&peer1), Some(Direction::Inbound))); + assert!(matches!(controller.nodes.get(&peer2), Some(Direction::Outbound))); + assert_eq!(controller.num_out, 1); + assert_eq!(controller.num_in, 1); + } + + #[test] + fn regular_nodes_stop_occupying_slots_when_become_reserved() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let outgoing_candidates = vec![peer1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `peer1` as outbound & `peer2` as inbound. + controller.alloc_slots(); + controller.on_incoming_connection(peer2, IncomingIndex(1)); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert_eq!(controller.num_in, 1); + assert_eq!(controller.num_out, 1); + + controller.on_add_reserved_peer(peer1); + controller.on_add_reserved_peer(peer2); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.num_in, 0); + assert_eq!(controller.num_out, 0); + } + + #[test] + fn disconnecting_regular_peers_work() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let outgoing_candidates = vec![peer1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `peer1` as outbound & `peer2` as inbound. + controller.alloc_slots(); + controller.on_incoming_connection(peer2, IncomingIndex(1)); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert_eq!(controller.nodes.len(), 2); + assert!(matches!(controller.nodes.get(&peer1), Some(Direction::Outbound))); + assert!(matches!(controller.nodes.get(&peer2), Some(Direction::Inbound))); + assert_eq!(controller.num_in, 1); + assert_eq!(controller.num_out, 1); + + controller.on_disconnect_peer(peer1); + assert_eq!( + rx.try_recv().unwrap(), + Message::Drop { set_id: SetId::from(0), peer_id: peer1 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.nodes.len(), 1); + assert!(!controller.nodes.contains_key(&peer1)); + assert_eq!(controller.num_in, 1); + assert_eq!(controller.num_out, 0); + + controller.on_disconnect_peer(peer2); + assert_eq!( + rx.try_recv().unwrap(), + Message::Drop { set_id: SetId::from(0), peer_id: peer2 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.nodes.len(), 0); + assert_eq!(controller.num_in, 0); + assert_eq!(controller.num_out, 0); + } + + #[test] + fn disconnecting_reserved_peers_is_a_noop() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [reserved1, reserved2].iter().cloned().collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(2).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(Vec::new()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `reserved1` as inbound & `reserved2` as outbound. + controller.on_incoming_connection(reserved1, IncomingIndex(1)); + controller.alloc_slots(); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + assert!(matches!( + controller.reserved_nodes.get(&reserved1), + Some(PeerState::Connected(Direction::Inbound)) + )); + assert!(matches!( + controller.reserved_nodes.get(&reserved2), + Some(PeerState::Connected(Direction::Outbound)) + )); + + controller.on_disconnect_peer(reserved1); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!( + controller.reserved_nodes.get(&reserved1), + Some(PeerState::Connected(Direction::Inbound)) + )); + + controller.on_disconnect_peer(reserved2); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!( + controller.reserved_nodes.get(&reserved2), + Some(PeerState::Connected(Direction::Outbound)) + )); + } + + #[test] + fn dropping_regular_peers_work() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let outgoing_candidates = vec![peer1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + peer_store.expect_report_disconnect().times(2).return_const(()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `peer1` as outbound & `peer2` as inbound. + controller.alloc_slots(); + controller.on_incoming_connection(peer2, IncomingIndex(1)); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: peer1 })); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert_eq!(controller.nodes.len(), 2); + assert!(matches!(controller.nodes.get(&peer1), Some(Direction::Outbound))); + assert!(matches!(controller.nodes.get(&peer2), Some(Direction::Inbound))); + assert_eq!(controller.num_in, 1); + assert_eq!(controller.num_out, 1); + + controller.on_peer_dropped(peer1); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.nodes.len(), 1); + assert!(!controller.nodes.contains_key(&peer1)); + assert_eq!(controller.num_in, 1); + assert_eq!(controller.num_out, 0); + + controller.on_peer_dropped(peer2); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert_eq!(controller.nodes.len(), 0); + assert_eq!(controller.num_in, 0); + assert_eq!(controller.num_out, 0); + } + + #[test] + fn incoming_request_for_connected_reserved_node_switches_it_to_inbound() { + let reserved1 = PeerId::random(); + let reserved2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: [reserved1, reserved2].iter().cloned().collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(2).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(Vec::new()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `reserved1` as inbound & `reserved2` as outbound. + controller.on_incoming_connection(reserved1, IncomingIndex(1)); + controller.alloc_slots(); + let mut messages = Vec::new(); + while let Some(message) = rx.try_recv().ok() { + messages.push(message); + } + assert_eq!(messages.len(), 2); + assert!(messages.contains(&Message::Accept(IncomingIndex(1)))); + assert!(messages.contains(&Message::Connect { set_id: SetId::from(0), peer_id: reserved2 })); + assert!(matches!( + controller.reserved_nodes.get(&reserved1), + Some(PeerState::Connected(Direction::Inbound)) + )); + assert!(matches!( + controller.reserved_nodes.get(&reserved2), + Some(PeerState::Connected(Direction::Outbound)) + )); + + // Incoming request for `reserved1`. + controller.on_incoming_connection(reserved1, IncomingIndex(2)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(2))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!( + controller.reserved_nodes.get(&reserved1), + Some(PeerState::Connected(Direction::Inbound)) + )); + + // Incoming request for `reserved2`. + controller.on_incoming_connection(reserved2, IncomingIndex(3)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(3))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!( + controller.reserved_nodes.get(&reserved2), + Some(PeerState::Connected(Direction::Inbound)) + )); + } + + #[test] + fn incoming_request_for_connected_regular_node_switches_it_to_inbound() { + let regular1 = PeerId::random(); + let regular2 = PeerId::random(); + let outgoing_candidates = vec![regular1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().times(3).return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Connect `regular1` as outbound. + controller.alloc_slots(); + assert_eq!( + rx.try_recv().ok().unwrap(), + Message::Connect { set_id: SetId::from(0), peer_id: regular1 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular1).unwrap(), Direction::Outbound,)); + + // Connect `regular2` as inbound. + controller.on_incoming_connection(regular2, IncomingIndex(0)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(0))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular2).unwrap(), Direction::Inbound,)); + + // Incoming request for `regular1`. + controller.on_incoming_connection(regular1, IncomingIndex(1)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular1).unwrap(), Direction::Inbound,)); + + // Incoming request for `regular2`. + controller.on_incoming_connection(regular2, IncomingIndex(2)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(2))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular2).unwrap(), Direction::Inbound,)); + } + + #[test] + fn incoming_request_for_connected_node_is_rejected_if_its_banned() { + let regular1 = PeerId::random(); + let regular2 = PeerId::random(); + let outgoing_candidates = vec![regular1]; + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + peer_store.expect_is_banned().times(2).return_const(true); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Connect `regular1` as outbound. + controller.alloc_slots(); + assert_eq!( + rx.try_recv().ok().unwrap(), + Message::Connect { set_id: SetId::from(0), peer_id: regular1 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular1).unwrap(), Direction::Outbound,)); + + // Connect `regular2` as inbound. + controller.on_incoming_connection(regular2, IncomingIndex(0)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(0))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular2).unwrap(), Direction::Inbound,)); + + // Incoming request for `regular1`. + controller.on_incoming_connection(regular1, IncomingIndex(1)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Reject(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(!controller.nodes.contains_key(®ular1)); + + // Incoming request for `regular2`. + controller.on_incoming_connection(regular2, IncomingIndex(2)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Reject(IncomingIndex(2))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(!controller.nodes.contains_key(®ular2)); + } + + #[test] + fn incoming_request_for_connected_node_is_rejected_if_no_slots_available() { + let regular1 = PeerId::random(); + let regular2 = PeerId::random(); + let outgoing_candidates = vec![regular1]; + + let config = ProtoSetConfig { + in_peers: 1, + out_peers: 1, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert_eq!(controller.num_out, 0); + assert_eq!(controller.num_in, 0); + + // Connect `regular1` as outbound. + controller.alloc_slots(); + assert_eq!( + rx.try_recv().ok().unwrap(), + Message::Connect { set_id: SetId::from(0), peer_id: regular1 } + ); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular1).unwrap(), Direction::Outbound,)); + + // Connect `regular2` as inbound. + controller.on_incoming_connection(regular2, IncomingIndex(0)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Accept(IncomingIndex(0))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(matches!(controller.nodes.get(®ular2).unwrap(), Direction::Inbound,)); + + controller.max_in = 0; + + // Incoming request for `regular1`. + controller.on_incoming_connection(regular1, IncomingIndex(1)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Reject(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(!controller.nodes.contains_key(®ular1)); + + // Incoming request for `regular2`. + controller.on_incoming_connection(regular2, IncomingIndex(2)); + assert_eq!(rx.try_recv().ok().unwrap(), Message::Reject(IncomingIndex(2))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + assert!(!controller.nodes.contains_key(®ular2)); + } + + #[test] + fn incoming_peers_that_exceed_slots_are_rejected() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 1, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(false); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Connect `peer1` as inbound. + controller.on_incoming_connection(peer1, IncomingIndex(1)); + assert_eq!(rx.try_recv().unwrap(), Message::Accept(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + + // Incoming requests for `peer2`. + controller.on_incoming_connection(peer2, IncomingIndex(2)); + assert_eq!(rx.try_recv().unwrap(), Message::Reject(IncomingIndex(2))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + } + + #[test] + fn banned_regular_incoming_node_is_rejected() { + let peer1 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: HashSet::new(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(true); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + + // Incoming request. + controller.on_incoming_connection(peer1, IncomingIndex(1)); + assert_eq!(rx.try_recv().unwrap(), Message::Reject(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + } + + #[test] + fn banned_reserved_incoming_node_is_rejected() { + let reserved1 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: std::iter::once(reserved1).collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(true); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert!(controller.reserved_nodes.contains_key(&reserved1)); + + // Incoming request. + controller.on_incoming_connection(reserved1, IncomingIndex(1)); + assert_eq!(rx.try_recv().unwrap(), Message::Reject(IncomingIndex(1))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + } + + #[test] + fn we_dont_connect_to_banned_reserved_node() { + let reserved1 = PeerId::random(); + + let config = ProtoSetConfig { + in_peers: 10, + out_peers: 10, + reserved_nodes: std::iter::once(reserved1).collect(), + reserved_only: false, + }; + let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100); + + let mut peer_store = MockPeerStoreHandle::new(); + peer_store.expect_register_protocol().once().return_const(()); + peer_store.expect_is_banned().once().return_const(true); + peer_store.expect_outgoing_candidates().once().return_const(Vec::new()); + + let (_handle, mut controller) = + ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); + assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); + + // Initiate connectios + controller.alloc_slots(); + assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); + assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); + } +} diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index e0f4074e0a22e..5af072aaddc62 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -34,12 +34,13 @@ //! - If provided, a ["requests processing"](ProtocolConfig::inbound_queue) channel //! is used to handle incoming requests. -use crate::{types::ProtocolName, ReputationChange}; - -use futures::{ - channel::{mpsc, oneshot}, - prelude::*, +use crate::{ + peer_store::{PeerStoreProvider, BANNED_THRESHOLD}, + types::ProtocolName, + ReputationChange, }; + +use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{Endpoint, Multiaddr}, request_response::{self, Behaviour, Codec, Message, ProtocolSupport, ResponseChannel}, @@ -52,8 +53,6 @@ use libp2p::{ PeerId, }; -use sc_peerset::{PeersetHandle, BANNED_THRESHOLD}; - use std::{ collections::{hash_map::Entry, HashMap}, io, iter, @@ -126,7 +125,7 @@ pub struct ProtocolConfig { /// other peers. If this is `Some` but the channel is closed, then the local node will /// advertise support for this protocol, but any incoming request will lead to an error being /// sent back. - pub inbound_queue: Option>, + pub inbound_queue: Option>, } /// A single request received by a peer on a request-response protocol. @@ -259,8 +258,10 @@ pub struct RequestResponsesBehaviour { /// /// Contains the underlying libp2p request-response [`Behaviour`], plus an optional /// "response builder" used to build responses for incoming requests. - protocols: - HashMap, Option>)>, + protocols: HashMap< + ProtocolName, + (Behaviour, Option>), + >, /// Pending requests, passed down to a request-response [`Behaviour`], awaiting a reply. pending_requests: @@ -280,25 +281,7 @@ pub struct RequestResponsesBehaviour { send_feedback: HashMap>, /// Primarily used to get a reputation of a node. - peerset: PeersetHandle, - - /// Pending message request, holds `MessageRequest` as a Future state to poll it - /// until we get a response from `Peerset` - message_request: Option, -} - -// This is a state of processing incoming request Message. -// The main reason of this struct is to hold `get_peer_reputation` as a Future state. -struct MessageRequest { - peer: PeerId, - request_id: RequestId, - request: Vec, - channel: ResponseChannel, ()>>, - protocol: ProtocolName, - resp_builder: Option>, - // Once we get incoming request we save all params, create an async call to Peerset - // to get the reputation of the peer. - get_peer_reputation: Pin> + Send>>, + peer_store: Box, } /// Generated by the response builder and waiting to be processed. @@ -315,7 +298,7 @@ impl RequestResponsesBehaviour { /// the same protocol is passed twice. pub fn new( list: impl Iterator, - peerset: PeersetHandle, + peer_store: Box, ) -> Result { let mut protocols = HashMap::new(); for protocol in list { @@ -352,8 +335,7 @@ impl RequestResponsesBehaviour { pending_responses: Default::default(), pending_responses_arrival_time: Default::default(), send_feedback: Default::default(), - peerset, - message_request: None, + peer_store, }) } @@ -371,6 +353,8 @@ impl RequestResponsesBehaviour { pending_response: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, ) { + log::trace!(target: "sub-libp2p", "send request to {target} ({protocol_name:?}), {} bytes", request.len()); + if let Some((protocol, _)) = self.protocols.get_mut(protocol_name) { if protocol.is_connected(target) || connect.should_connect() { let request_id = protocol.send_request(target, request); @@ -568,92 +552,6 @@ impl NetworkBehaviour for RequestResponsesBehaviour { params: &mut impl PollParameters, ) -> Poll>> { 'poll_all: loop { - if let Some(message_request) = self.message_request.take() { - // Now we can can poll `MessageRequest` until we get the reputation - - let MessageRequest { - peer, - request_id, - request, - channel, - protocol, - resp_builder, - mut get_peer_reputation, - } = message_request; - - let reputation = Future::poll(Pin::new(&mut get_peer_reputation), cx); - match reputation { - Poll::Pending => { - // Save the state to poll it again next time. - - self.message_request = Some(MessageRequest { - peer, - request_id, - request, - channel, - protocol, - resp_builder, - get_peer_reputation, - }); - return Poll::Pending - }, - Poll::Ready(reputation) => { - // Once we get the reputation we can continue processing the request. - - let reputation = reputation.expect( - "The channel can only be closed if the peerset no longer exists; qed", - ); - - if reputation < BANNED_THRESHOLD { - log::debug!( - target: "sub-libp2p", - "Cannot handle requests from a node with a low reputation {}: {}", - peer, - reputation, - ); - continue 'poll_all - } - - let (tx, rx) = oneshot::channel(); - - // Submit the request to the "response builder" passed by the user at - // initialization. - if let Some(mut resp_builder) = resp_builder { - // If the response builder is too busy, silently drop `tx`. This - // will be reported by the corresponding request-response [`Behaviour`] - // through an `InboundFailure::Omission` event. - let _ = resp_builder.try_send(IncomingRequest { - peer, - payload: request, - pending_response: tx, - }); - } else { - debug_assert!(false, "Received message on outbound-only protocol."); - } - - self.pending_responses.push(Box::pin(async move { - // The `tx` created above can be dropped if we are not capable of - // processing this request, which is reflected as a - // `InboundFailure::Omission` event. - if let Ok(response) = rx.await { - Some(RequestProcessingOutcome { - peer, - request_id, - protocol, - inner_channel: channel, - response, - }) - } else { - None - } - })); - - // This `continue` makes sure that `pending_responses` gets polled - // after we have added the new element. - continue 'poll_all - }, - } - } // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { let RequestProcessingOutcome { @@ -671,6 +569,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour { if let Ok(payload) = result { if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) { + log::trace!(target: "sub-libp2p", "send response to {peer} ({protocol_name:?}), {} bytes", payload.len()); + if protocol.send_response(inner_channel, Ok(payload)).is_err() { // Note: Failure is handled further below when receiving // `InboundFailure` event from request-response [`Behaviour`]. @@ -698,7 +598,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Poll request-responses protocols. for (protocol, (behaviour, resp_builder)) in &mut self.protocols { - while let Poll::Ready(ev) = behaviour.poll(cx, params) { + 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx, params) { let ev = match ev { // Main events we are interested in. ToSwarm::GenerateEvent(ev) => ev, @@ -734,23 +634,56 @@ impl NetworkBehaviour for RequestResponsesBehaviour { self.pending_responses_arrival_time .insert((protocol.clone(), request_id).into(), Instant::now()); - let get_peer_reputation = self.peerset.clone().peer_reputation(peer); - let get_peer_reputation = Box::pin(get_peer_reputation); + let reputation = self.peer_store.peer_reputation(&peer); - // Save the Future-like state with params to poll `get_peer_reputation` - // and to continue processing the request once we get the reputation of - // the peer. - self.message_request = Some(MessageRequest { - peer, - request_id, - request, - channel, - protocol: protocol.clone(), - resp_builder: resp_builder.clone(), - get_peer_reputation, - }); + if reputation < BANNED_THRESHOLD { + log::debug!( + target: "sub-libp2p", + "Cannot handle requests from a node with a low reputation {}: {}", + peer, + reputation, + ); + continue 'poll_protocol + } + + let (tx, rx) = oneshot::channel(); + + // Submit the request to the "response builder" passed by the user at + // initialization. + if let Some(resp_builder) = resp_builder { + // If the response builder is too busy, silently drop `tx`. This + // will be reported by the corresponding request-response + // [`Behaviour`] through an `InboundFailure::Omission` event. + // Note that we use `async_channel::bounded` and not `mpsc::channel` + // because the latter allocates an extra slot for every cloned + // sender. + let _ = resp_builder.try_send(IncomingRequest { + peer, + payload: request, + pending_response: tx, + }); + } else { + debug_assert!(false, "Received message on outbound-only protocol."); + } + + let protocol = protocol.clone(); - // This `continue` makes sure that `message_request` gets polled + self.pending_responses.push(Box::pin(async move { + // The `tx` created above can be dropped if we are not capable of + // processing this request, which is reflected as a + // `InboundFailure::Omission` event. + rx.await.map_or(None, |response| { + Some(RequestProcessingOutcome { + peer, + request_id, + protocol, + inner_channel: channel, + response, + }) + }) + })); + + // This `continue` makes sure that `pending_responses` gets polled // after we have added the new element. continue 'poll_all }, @@ -766,6 +699,12 @@ impl NetworkBehaviour for RequestResponsesBehaviour { .remove(&(protocol.clone(), request_id).into()) { Some((started, pending_response)) => { + log::trace!( + target: "sub-libp2p", + "received response from {peer} ({protocol:?}), {} bytes", + response.as_ref().map_or(0usize, |response| response.len()), + ); + let delivered = pending_response .send(response.map_err(|()| RequestFailure::Refused)) .map_err(|_| RequestFailure::Obsolete); @@ -1036,11 +975,8 @@ impl Codec for GenericCodec { mod tests { use super::*; - use futures::{ - channel::{mpsc, oneshot}, - executor::LocalPool, - task::Spawn, - }; + use crate::mock::MockPeerStore; + use futures::{channel::oneshot, executor::LocalPool, task::Spawn}; use libp2p::{ core::{ transport::{MemoryTransport, Transport}, @@ -1051,7 +987,6 @@ mod tests { swarm::{Executor, Swarm, SwarmBuilder, SwarmEvent}, Multiaddr, }; - use sc_peerset::{Peerset, PeersetConfig, SetConfig}; use std::{iter, time::Duration}; struct TokioExecutor(tokio::runtime::Runtime); @@ -1063,7 +998,7 @@ mod tests { fn build_swarm( list: impl Iterator, - ) -> (Swarm, Multiaddr, Peerset) { + ) -> (Swarm, Multiaddr) { let keypair = Keypair::generate_ed25519(); let transport = MemoryTransport::new() @@ -1072,19 +1007,7 @@ mod tests { .multiplex(libp2p::yamux::Config::default()) .boxed(); - let config = PeersetConfig { - sets: vec![SetConfig { - in_peers: u32::max_value(), - out_peers: u32::max_value(), - bootnodes: vec![], - reserved_nodes: Default::default(), - reserved_only: false, - }], - }; - - let (peerset, handle) = Peerset::from_config(config); - - let behaviour = RequestResponsesBehaviour::new(list, handle).unwrap(); + let behaviour = RequestResponsesBehaviour::new(list, Box::new(MockPeerStore {})).unwrap(); let runtime = tokio::runtime::Runtime::new().unwrap(); let mut swarm = SwarmBuilder::with_executor( @@ -1097,11 +1020,7 @@ mod tests { let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); swarm.listen_on(listen_addr.clone()).unwrap(); - (swarm, listen_addr, peerset) - } - - async fn loop_peerset(peerset: Peerset) { - let _: Vec<_> = peerset.collect().await; + (swarm, listen_addr) } #[test] @@ -1112,7 +1031,7 @@ mod tests { // Build swarms whose behaviour is [`RequestResponsesBehaviour`]. let mut swarms = (0..2) .map(|_| { - let (tx, mut rx) = mpsc::channel::(64); + let (tx, mut rx) = async_channel::bounded::(64); pool.spawner() .spawn_obj( @@ -1153,9 +1072,7 @@ mod tests { Swarm::dial(&mut swarms[0].0, dial_addr).unwrap(); } - let (mut swarm, _, peerset) = swarms.remove(0); - // Process every peerset event in the background. - pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); + let (mut swarm, _) = swarms.remove(0); // Running `swarm[0]` in the background. pool.spawner() .spawn_obj({ @@ -1175,9 +1092,7 @@ mod tests { .unwrap(); // Remove and run the remaining swarm. - let (mut swarm, _, peerset) = swarms.remove(0); - // Process every peerset event in the background. - pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); + let (mut swarm, _) = swarms.remove(0); pool.run_until(async move { let mut response_receiver = None; @@ -1215,7 +1130,7 @@ mod tests { // Build swarms whose behaviour is [`RequestResponsesBehaviour`]. let mut swarms = (0..2) .map(|_| { - let (tx, mut rx) = mpsc::channel::(64); + let (tx, mut rx) = async_channel::bounded::(64); pool.spawner() .spawn_obj( @@ -1256,9 +1171,7 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. - let (mut swarm, _, peerset) = swarms.remove(0); - // Process every peerset event in the background. - pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); + let (mut swarm, _) = swarms.remove(0); pool.spawner() .spawn_obj({ async move { @@ -1278,9 +1191,7 @@ mod tests { .unwrap(); // Remove and run the remaining swarm. - let (mut swarm, _, peerset) = swarms.remove(0); - // Process every peerset event in the background. - pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); + let (mut swarm, _) = swarms.remove(0); pool.run_until(async move { let mut response_receiver = None; @@ -1352,9 +1263,9 @@ mod tests { build_swarm(protocol_configs.into_iter()).0 }; - let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2, peerset) = { - let (tx_1, rx_1) = mpsc::channel(64); - let (tx_2, rx_2) = mpsc::channel(64); + let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2) = { + let (tx_1, rx_1) = async_channel::bounded(64); + let (tx_2, rx_2) = async_channel::bounded(64); let protocol_configs = vec![ ProtocolConfig { @@ -1375,12 +1286,10 @@ mod tests { }, ]; - let (swarm, listen_addr, peerset) = build_swarm(protocol_configs.into_iter()); + let (swarm, listen_addr) = build_swarm(protocol_configs.into_iter()); - (swarm, rx_1, rx_2, listen_addr, peerset) + (swarm, rx_1, rx_2, listen_addr) }; - // Process every peerset event in the background. - pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); // Ask swarm 1 to dial swarm 2. There isn't any discovery mechanism in place in this test, // so they wouldn't connect to each other. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index cd8e18a2e7d9f..aca0072a31de6 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,14 +29,16 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - config::{FullNetworkConfiguration, MultiaddrWithPeerId, Params, TransportConfig}, + config::{parse_addr, FullNetworkConfiguration, MultiaddrWithPeerId, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, event::{DhtEvent, Event}, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, + peer_store::{PeerStoreHandle, PeerStoreProvider}, protocol::{self, NotifsHandlerError, Protocol, Ready}, + protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId}, request_responses::{IfDisconnected, RequestFailure}, service::{ signature::{Signature, SigningError}, @@ -73,7 +75,6 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_network_common::ExHashT; -use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; @@ -106,18 +107,15 @@ pub struct NetworkService { /// Number of peers we're connected to. num_connected: Arc, /// The local external addresses. - external_addresses: Arc>>, + external_addresses: Arc>>, /// Listen addresses. Do **NOT** include a trailing `/p2p/` with our `PeerId`. - listen_addresses: Arc>>, + listen_addresses: Arc>>, /// Local copy of the `PeerId` of the local node. local_peer_id: PeerId, /// The `KeyPair` that defines the `PeerId` of the local node. local_identity: Keypair, /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. bandwidth: Arc, - /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which - /// nodes it should be connected to or not. - peerset: PeersetHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender, /// For each peer and protocol combination, an object that allows sending notifications to @@ -126,6 +124,14 @@ pub struct NetworkService { /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, + /// Protocol name -> `SetId` mapping for notification protocols. The map never changes after + /// initialization. + notification_protocol_ids: HashMap, + /// Handles to manage peer connections on notification protocols. The vector never changes + /// after initialization. + protocol_handles: Vec, + /// Shortcut to sync protocol handle (`protocol_handles[0]`). + sync_protocol_handle: protocol_controller::ProtocolHandle, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -262,24 +268,93 @@ where ) }; - let (protocol, peerset_handle, mut known_addresses) = Protocol::new( + let (to_notifications, from_protocol_controllers) = + tracing_unbounded("mpsc_protocol_controllers_to_notifications", 10_000); + + // We must prepend a hardcoded default peer set to notification protocols. + let all_peer_sets_iter = iter::once(&network_config.default_peers_set) + .chain(notification_protocols.iter().map(|protocol| &protocol.set_config)); + + let (protocol_handles, protocol_controllers): (Vec<_>, Vec<_>) = all_peer_sets_iter + .enumerate() + .map(|(set_id, set_config)| { + let proto_set_config = ProtoSetConfig { + in_peers: set_config.in_peers, + out_peers: set_config.out_peers, + reserved_nodes: set_config + .reserved_nodes + .iter() + .map(|node| node.peer_id) + .collect(), + reserved_only: set_config.non_reserved_mode.is_reserved_only(), + }; + + ProtocolController::new( + SetId::from(set_id), + proto_set_config, + to_notifications.clone(), + Box::new(params.peer_store.clone()), + ) + }) + .unzip(); + + // Shortcut to default (sync) peer set protocol handle. + let sync_protocol_handle = protocol_handles[0].clone(); + + // Spawn `ProtocolController` runners. + protocol_controllers + .into_iter() + .for_each(|controller| (params.executor)(controller.run().boxed())); + + // Protocol name to protocol id mapping. The first protocol is always block announce (sync) + // protocol, aka default (hardcoded) peer set. + let notification_protocol_ids: HashMap = + iter::once(¶ms.block_announce_config) + .chain(notification_protocols.iter()) + .enumerate() + .map(|(index, protocol)| { + (protocol.notifications_protocol.clone(), SetId::from(index)) + }) + .collect(); + + let protocol = Protocol::new( From::from(¶ms.role), - &network_config, - notification_protocols, + notification_protocols.clone(), params.block_announce_config, + params.peer_store.clone(), + protocol_handles.clone(), + from_protocol_controllers, params.tx, )?; - // List of multiaddresses that we know in the network. - let mut boot_node_ids = HashSet::new(); + let known_addresses = { + // Collect all reserved nodes and bootnodes addresses. + let mut addresses: Vec<_> = network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) + .chain(notification_protocols.iter().flat_map(|protocol| { + protocol + .set_config + .reserved_nodes + .iter() + .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) + })) + .chain( + network_config + .boot_nodes + .iter() + .map(|bootnode| (bootnode.peer_id, bootnode.multiaddr.clone())), + ) + .collect(); - // Process the bootnodes. - for bootnode in network_config.boot_nodes.iter() { - boot_node_ids.insert(bootnode.peer_id); - known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone())); - } + // Remove possible duplicates. + addresses.sort(); + addresses.dedup(); - let boot_node_ids = Arc::new(boot_node_ids); + addresses + }; // Check for duplicate bootnodes. network_config.boot_nodes.iter().try_for_each(|bootnode| { @@ -299,7 +374,20 @@ where } })?; + // List of bootnode multiaddresses. + let mut boot_node_ids = HashMap::>::new(); + + for bootnode in network_config.boot_nodes.iter() { + boot_node_ids + .entry(bootnode.peer_id) + .or_default() + .push(bootnode.multiaddr.clone()); + } + + let boot_node_ids = Arc::new(boot_node_ids); + let num_connected = Arc::new(AtomicUsize::new(0)); + let external_addresses = Arc::new(Mutex::new(HashSet::new())); // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { @@ -320,6 +408,7 @@ where config.use_kademlia_disjoint_query_paths( network_config.kademlia_disjoint_query_paths, ); + config.with_kademlia_replication_factor(network_config.kademlia_replication_factor); match network_config.transport { TransportConfig::MemoryOnly => { @@ -346,7 +435,8 @@ where local_public, discovery_config, request_response_protocols, - peerset_handle.clone(), + params.peer_store.clone(), + external_addresses.clone(), ); match result { @@ -417,16 +507,14 @@ where ); } - let external_addresses = Arc::new(Mutex::new(Vec::new())); - let listen_addresses = Arc::new(Mutex::new(Vec::new())); + let listen_addresses = Arc::new(Mutex::new(HashSet::new())); let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); let service = Arc::new(NetworkService { bandwidth, - external_addresses: external_addresses.clone(), + external_addresses, listen_addresses: listen_addresses.clone(), num_connected: num_connected.clone(), - peerset: peerset_handle, local_peer_id, local_identity, to_worker, @@ -434,21 +522,25 @@ where notifications_sizes_metric: metrics .as_ref() .map(|metrics| metrics.notifications_sizes.clone()), + notification_protocol_ids, + protocol_handles, + sync_protocol_handle, _marker: PhantomData, _block: Default::default(), }); Ok(NetworkWorker { - external_addresses, listen_addresses, num_connected, network_service: swarm, service, from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, - peers_notifications_sinks, metrics, boot_node_ids, + reported_invalid_boot_nodes: Default::default(), + peers_notifications_sinks, + peer_store_handle: params.peer_store, _marker: Default::default(), _block: Default::default(), }) @@ -605,7 +697,11 @@ where external_addresses, connected_peers, not_connected_peers, - peerset: swarm.behaviour_mut().user_protocol_mut().peerset_debug_info(), + // TODO: Check what info we can include here. + // Issue reference: https://github.com/paritytech/substrate/issues/14160. + peerset: serde_json::json!( + "Unimplemented. See https://github.com/paritytech/substrate/issues/14160." + ), } } @@ -618,11 +714,6 @@ where pub fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { self.service.add_reserved_peer(peer) } - - /// Returns the list of reserved peers. - pub fn reserved_peers(&self) -> impl Iterator { - self.network_service.behaviour().user_protocol().reserved_peers() - } } impl NetworkService { @@ -652,11 +743,9 @@ impl NetworkService { pub async fn reserved_peers(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::ReservedPeers { pending_response: tx }); + self.sync_protocol_handle.reserved_peers(tx); - // The channel can only be closed if the network worker no longer exists. + // The channel can only be closed if `ProtocolController` no longer exists. rx.await.map_err(|_| ()) } @@ -696,12 +785,12 @@ where { /// Returns the local external addresses. fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() + self.external_addresses.lock().iter().cloned().collect() } /// Returns the listener addresses (without trailing `/p2p/` with our `PeerId`). fn listen_addresses(&self) -> Vec { - self.listen_addresses.lock().clone() + self.listen_addresses.lock().iter().cloned().collect() } /// Returns the local Peer ID. @@ -769,13 +858,11 @@ where H: ExHashT, { fn set_authorized_peers(&self, peers: HashSet) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + self.sync_protocol_handle.set_reserved_peers(peers); } fn set_authorized_only(&self, reserved_only: bool) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + self.sync_protocol_handle.set_reserved_only(reserved_only); } fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { @@ -785,7 +872,7 @@ where } fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ReportPeer(who, cost_benefit)); } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { @@ -793,15 +880,15 @@ where } fn accept_unreserved_peers(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); + self.sync_protocol_handle.set_reserved_only(false); } fn deny_unreserved_peers(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); + self.sync_protocol_handle.set_reserved_only(true); } fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { - // Make sure the local peer ID is never added to the PSM. + // Make sure the local peer ID is never added as a reserved peer. if peer.peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } @@ -809,12 +896,12 @@ where let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr)); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer.peer_id)); + self.sync_protocol_handle.add_reserved_peer(peer.peer_id); Ok(()) } fn remove_reserved_peer(&self, peer_id: PeerId) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); + self.sync_protocol_handle.remove_reserved_peer(peer_id); } fn set_reserved_peers( @@ -822,6 +909,10 @@ where protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { + let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { + return Err(format!("Cannot set reserved peers for unknown protocol: {}", protocol)) + }; + let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; let mut peers: HashSet = HashSet::with_capacity(peers_addrs.len()); @@ -841,9 +932,7 @@ where } } - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetPeersetReserved(protocol, peers)); + self.protocol_handles[usize::from(*set_id)].set_reserved_peers(peers); Ok(()) } @@ -853,6 +942,12 @@ where protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { + let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { + return Err( + format!("Cannot add peers to reserved set of unknown protocol: {}", protocol) + ) + }; + let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { @@ -866,56 +961,31 @@ where .to_worker .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); } - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id)); + + self.protocol_handles[usize::from(*set_id)].add_reserved_peer(peer_id); } Ok(()) } - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { - for peer_id in peers.into_iter() { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id)); - } - } - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: Vec, ) -> Result<(), String> { - let peers = self.split_multiaddr_and_peer_id(peers)?; - - for (peer_id, addr) in peers.into_iter() { - // Make sure the local peer ID is never added to the PSM. - if peer_id == self.local_peer_id { - return Err("Local peer ID cannot be added as a reserved peer.".to_string()) - } + let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { + return Err( + format!("Cannot remove peers from reserved set of unknown protocol: {}", protocol) + ) + }; - if !addr.is_empty() { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - } - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddToPeersSet(protocol.clone(), peer_id)); + for peer_id in peers.into_iter() { + self.protocol_handles[usize::from(*set_id)].remove_reserved_peer(peer_id); } Ok(()) } - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { - for peer_id in peers.into_iter() { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RemoveFromPeersSet(protocol.clone(), peer_id)); - } - } - fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } @@ -1121,15 +1191,7 @@ enum ServiceToWorkerMsg { GetValue(KademliaKey), PutValue(KademliaKey, Vec), AddKnownAddress(PeerId, Multiaddr), - SetReservedOnly(bool), - AddReserved(PeerId), - RemoveReserved(PeerId), - SetReserved(HashSet), - SetPeersetReserved(ProtocolName, HashSet), - AddSetReserved(ProtocolName, PeerId), - RemoveSetReserved(ProtocolName, PeerId), - AddToPeersSet(ProtocolName, PeerId), - RemoveFromPeersSet(ProtocolName, PeerId), + ReportPeer(PeerId, ReputationChange), EventStream(out_events::Sender), Request { target: PeerId, @@ -1146,9 +1208,6 @@ enum ServiceToWorkerMsg { }, DisconnectPeer(PeerId, ProtocolName), SetNotificationHandshake(ProtocolName, Vec), - ReservedPeers { - pending_response: oneshot::Sender>, - }, } /// Main network worker. Must be polled in order for the network to advance. @@ -1161,9 +1220,7 @@ where H: ExHashT, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - external_addresses: Arc>>, - /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - listen_addresses: Arc>>, + listen_addresses: Arc>>, /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. num_connected: Arc, /// The network service that can be extracted and shared through the codebase. @@ -1176,11 +1233,15 @@ where event_streams: out_events::OutChannels, /// Prometheus network metrics. metrics: Option, - /// The `PeerId`'s of all boot nodes. - boot_node_ids: Arc>, + /// The `PeerId`'s of all boot nodes mapped to the registered addresses. + boot_node_ids: Arc>>, + /// Boot nodes that we already have reported as invalid. + reported_invalid_boot_nodes: HashSet, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, + /// Peer reputation store handle. + peer_store_handle: PeerStoreHandle, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -1218,19 +1279,10 @@ where }, }; - // Update the variables shared with the `NetworkService`. + // Update the `num_connected` count shared with the `NetworkService`. let num_connected_peers = self.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); self.num_connected.store(num_connected_peers, Ordering::Relaxed); - { - let external_addresses = - self.network_service.external_addresses().map(|r| &r.addr).cloned().collect(); - *self.external_addresses.lock() = external_addresses; - - let listen_addresses = - self.network_service.listeners().map(ToOwned::to_owned).collect(); - *self.listen_addresses.lock() = listen_addresses; - } if let Some(metrics) = self.metrics.as_ref() { if let Some(buckets) = self.network_service.behaviour_mut().num_entries_per_kbucket() { @@ -1251,8 +1303,7 @@ where } metrics .peerset_num_discovered - .set(self.network_service.behaviour_mut().user_protocol().num_discovered_peers() - as u64); + .set(self.peer_store_handle.num_known_peers() as u64); metrics.pending_connections.set( Swarm::network_info(&self.network_service).connection_counters().num_pending() as u64, @@ -1269,53 +1320,10 @@ where self.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => self.network_service.behaviour_mut().put_value(key, value), - ServiceToWorkerMsg::SetReservedOnly(reserved_only) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_reserved_only(reserved_only), - ServiceToWorkerMsg::SetReserved(peers) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_reserved_peers(peers), - ServiceToWorkerMsg::SetPeersetReserved(protocol, peers) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_reserved_peerset_peers(protocol, peers), - ServiceToWorkerMsg::AddReserved(peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .add_reserved_peer(peer_id), - ServiceToWorkerMsg::RemoveReserved(peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .remove_reserved_peer(peer_id), - ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .add_set_reserved_peer(protocol, peer_id), - ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .remove_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => self.network_service.behaviour_mut().add_known_address(peer_id, addr), - ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .add_to_peers_set(protocol, peer_id), - ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .remove_from_peers_set(protocol, peer_id), + ServiceToWorkerMsg::ReportPeer(peer_id, reputation_change) => + self.peer_store_handle.report_peer(peer_id, reputation_change), ServiceToWorkerMsg::EventStream(sender) => self.event_streams.push(sender), ServiceToWorkerMsg::Request { target, @@ -1348,10 +1356,6 @@ where .behaviour_mut() .user_protocol_mut() .set_notification_handshake(protocol, handshake), - ServiceToWorkerMsg::ReservedPeers { pending_response } => { - let _ = - pending_response.send(self.reserved_peers().map(ToOwned::to_owned).collect()); - }, } } @@ -1431,7 +1435,7 @@ where }, SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { peer, changes }) => { for change in changes { - self.network_service.behaviour().user_protocol().report_peer(peer, change); + self.peer_store_handle.report_peer(peer, change); } }, SwarmEvent::Behaviour(BehaviourOut::PeerIdentify { @@ -1454,16 +1458,10 @@ where .behaviour_mut() .add_self_reported_address_to_dht(&peer_id, &protocols, addr); } - self.network_service - .behaviour_mut() - .user_protocol_mut() - .add_default_set_discovered_nodes(iter::once(peer_id)); + self.peer_store_handle.add_known_peer(peer_id); }, SwarmEvent::Behaviour(BehaviourOut::Discovered(peer_id)) => { - self.network_service - .behaviour_mut() - .user_protocol_mut() - .add_default_set_discovered_nodes(iter::once(peer_id)); + self.peer_store_handle.add_known_peer(peer_id); }, SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted) => { if let Some(metrics) = self.metrics.as_ref() { @@ -1637,12 +1635,14 @@ where if let Some(metrics) = self.metrics.as_ref() { metrics.listeners_local_addresses.inc(); } + self.listen_addresses.lock().insert(address.clone()); }, SwarmEvent::ExpiredListenAddr { address, .. } => { info!(target: "sub-libp2p", "📪 No longer listening on {}", address); if let Some(metrics) = self.metrics.as_ref() { metrics.listeners_local_addresses.dec(); } + self.listen_addresses.lock().remove(&address); }, SwarmEvent::OutgoingConnectionError { peer_id, error } => { if let Some(peer_id) = peer_id { @@ -1652,15 +1652,27 @@ where peer_id, error, ); - if self.boot_node_ids.contains(&peer_id) { + let not_reported = !self.reported_invalid_boot_nodes.contains(&peer_id); + + if let Some(addresses) = + not_reported.then(|| self.boot_node_ids.get(&peer_id)).flatten() + { if let DialError::WrongPeerId { obtained, endpoint } = &error { if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { - warn!( - "💔 The bootnode you want to connect to at `{}` provided a different peer ID `{}` than the one you expect `{}`.", - address, - obtained, - peer_id, - ); + let address_without_peer_id = parse_addr(address.clone()) + .map_or_else(|_| address.clone(), |r| r.1); + + // Only report for address of boot node that was added at startup of + // the node and not for any address that the node learned of the + // boot node. + if addresses.iter().any(|a| address_without_peer_id == *a) { + warn!( + "💔 The bootnode you want to connect to at `{address}` provided a \ + different peer ID `{obtained}` than the one you expect `{peer_id}`.", + ); + + self.reported_invalid_boot_nodes.insert(peer_id); + } } } } @@ -1745,6 +1757,12 @@ where if let Some(metrics) = self.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); } + let mut listen_addresses = self.listen_addresses.lock(); + for addr in &addresses { + listen_addresses.remove(addr); + } + drop(listen_addresses); + let addrs = addresses.into_iter().map(|a| a.to_string()).collect::>().join(", "); match reason { diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 398c26793fd41..ededccd5e3233 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -34,37 +34,48 @@ use crate::event::Event; use futures::{prelude::*, ready, stream::FusedStream}; -use log::error; -use parking_lot::Mutex; +use log::{debug, error}; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use std::{ backtrace::Backtrace, cell::RefCell, fmt, pin::Pin, - sync::Arc, task::{Context, Poll}, }; +/// Log target for this file. +pub const LOG_TARGET: &str = "sub-libp2p::out_events"; + /// Creates a new channel that can be associated to a [`OutChannels`]. /// /// The name is used in Prometheus reports, the queue size threshold is used /// to warn if there are too many unprocessed events in the channel. pub fn channel(name: &'static str, queue_size_warning: usize) -> (Sender, Receiver) { let (tx, rx) = async_channel::unbounded(); - let metrics = Arc::new(Mutex::new(None)); let tx = Sender { inner: tx, name, queue_size_warning, - warning_fired: false, + warning_fired: SenderWarningState::NotFired, creation_backtrace: Backtrace::force_capture(), - metrics: metrics.clone(), + metrics: None, }; - let rx = Receiver { inner: rx, name, metrics }; + let rx = Receiver { inner: rx, name, metrics: None }; (tx, rx) } +/// A state of a sender warning that is used to avoid spamming the logs. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum SenderWarningState { + /// The warning has not been fired yet. + NotFired, + /// The warning has been fired, and the channel is full + FiredFull, + /// The warning has been fired and the channel is not full anymore. + FiredFree, +} + /// Sending side of a channel. /// /// Must be associated with an [`OutChannels`] before anything can be sent on it @@ -78,13 +89,14 @@ pub struct Sender { name: &'static str, /// Threshold queue size to generate an error message in the logs. queue_size_warning: usize, - /// We generate the error message only once to not spam the logs. - warning_fired: bool, + /// We generate the error message only once to not spam the logs after the first error. + /// Subsequently we indicate channel fullness on debug level. + warning_fired: SenderWarningState, /// Backtrace of a place where the channel was created. creation_backtrace: Backtrace, /// Clone of [`Receiver::metrics`]. Will be initialized when [`Sender`] is added to /// [`OutChannels`] with `OutChannels::push()`. - metrics: Arc>>>>, + metrics: Option, } impl fmt::Debug for Sender { @@ -95,8 +107,7 @@ impl fmt::Debug for Sender { impl Drop for Sender { fn drop(&mut self) { - let metrics = self.metrics.lock(); - if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { + if let Some(metrics) = self.metrics.as_ref() { metrics.num_channels.with_label_values(&[self.name]).dec(); } } @@ -108,7 +119,7 @@ pub struct Receiver { name: &'static str, /// Initially contains `None`, and will be set to a value once the corresponding [`Sender`] /// is assigned to an instance of [`OutChannels`]. - metrics: Arc>>>>, + metrics: Option, } impl Stream for Receiver { @@ -116,13 +127,8 @@ impl Stream for Receiver { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { if let Some(ev) = ready!(Pin::new(&mut self.inner).poll_next(cx)) { - let metrics = self.metrics.lock().clone(); - match metrics.as_ref().map(|m| m.as_ref()) { - Some(Some(metrics)) => metrics.event_out(&ev, self.name), - Some(None) => (), // no registry - None => log::warn!( - "Inconsistency in out_events: event happened before sender associated" - ), + if let Some(metrics) = &self.metrics { + metrics.event_out(&ev, self.name); } Poll::Ready(Some(ev)) } else { @@ -151,7 +157,7 @@ pub struct OutChannels { event_streams: Vec, /// The metrics we collect. A clone of this is sent to each [`Receiver`] associated with this /// object. - metrics: Arc>, + metrics: Option, } impl OutChannels { @@ -160,17 +166,15 @@ impl OutChannels { let metrics = if let Some(registry) = registry { Some(Metrics::register(registry)?) } else { None }; - Ok(Self { event_streams: Vec::new(), metrics: Arc::new(metrics) }) + Ok(Self { event_streams: Vec::new(), metrics }) } /// Adds a new [`Sender`] to the collection. - pub fn push(&mut self, sender: Sender) { - let mut metrics = sender.metrics.lock(); - debug_assert!(metrics.is_none()); - *metrics = Some(self.metrics.clone()); - drop(metrics); + pub fn push(&mut self, mut sender: Sender) { + debug_assert!(sender.metrics.is_none()); + sender.metrics = self.metrics.clone(); - if let Some(metrics) = &*self.metrics { + if let Some(metrics) = &self.metrics { metrics.num_channels.with_label_values(&[sender.name]).inc(); } @@ -180,22 +184,42 @@ impl OutChannels { /// Sends an event. pub fn send(&mut self, event: Event) { self.event_streams.retain_mut(|sender| { - if sender.inner.len() >= sender.queue_size_warning && !sender.warning_fired { - sender.warning_fired = true; - error!( - "The number of unprocessed events in channel `{}` exceeded {}.\n\ - The channel was created at:\n{:}\n - The last event was sent from:\n{:}", - sender.name, - sender.queue_size_warning, - sender.creation_backtrace, - Backtrace::force_capture(), + let current_pending = sender.inner.len(); + if current_pending >= sender.queue_size_warning { + if sender.warning_fired == SenderWarningState::NotFired { + error!( + "The number of unprocessed events in channel `{}` exceeded {}.\n\ + The channel was created at:\n{:}\n + The last event was sent from:\n{:}", + sender.name, + sender.queue_size_warning, + sender.creation_backtrace, + Backtrace::force_capture(), + ); + } else if sender.warning_fired == SenderWarningState::FiredFree { + // We don't want to spam the logs, so we only log on debug level + debug!( + target: LOG_TARGET, + "Channel `{}` is overflowed again. Number of events: {}", + sender.name, current_pending + ); + } + sender.warning_fired = SenderWarningState::FiredFull; + } else if sender.warning_fired == SenderWarningState::FiredFull && + current_pending < sender.queue_size_warning.wrapping_div(2) + { + sender.warning_fired = SenderWarningState::FiredFree; + debug!( + target: LOG_TARGET, + "Channel `{}` is no longer overflowed. Number of events: {}", + sender.name, current_pending ); } + sender.inner.try_send(event.clone()).is_ok() }); - if let Some(metrics) = &*self.metrics { + if let Some(metrics) = &self.metrics { for ev in &self.event_streams { metrics.event_in(&event, ev.name); } @@ -211,6 +235,7 @@ impl fmt::Debug for OutChannels { } } +#[derive(Clone)] struct Metrics { // This list is ordered alphabetically events_total: CounterVec, diff --git a/client/network/src/service/traits.rs b/client/network/src/service/traits.rs index 787ef4b5ae445..bed325ede4a85 100644 --- a/client/network/src/service/traits.rs +++ b/client/network/src/service/traits.rs @@ -24,13 +24,12 @@ use crate::{ request_responses::{IfDisconnected, RequestFailure}, service::signature::Signature, types::ProtocolName, + ReputationChange, }; use futures::{channel::oneshot, Stream}; use libp2p::{Multiaddr, PeerId}; -use sc_peerset::ReputationChange; - use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; @@ -156,10 +155,6 @@ pub trait NetworkPeers { /// Disconnect from a node as soon as possible. /// /// This triggers the same effects as if the connection had closed itself spontaneously. - /// - /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also - /// prevents the local node from re-establishing an outgoing substream to this peer until it - /// is added again. fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. @@ -193,7 +188,8 @@ pub trait NetworkPeers { /// this step if the peer set is in reserved only mode. /// /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). + /// invalid peer ID (which includes the local peer ID), or if `protocol` does not + /// refer to a known protocol. fn set_reserved_peers( &self, protocol: ProtocolName, @@ -206,7 +202,8 @@ pub trait NetworkPeers { /// consist of only `/p2p/`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). + /// invalid peer ID (which includes the local peer ID), or if `protocol` does not + /// refer to a know protocol. fn add_peers_to_reserved_set( &self, protocol: ProtocolName, @@ -214,28 +211,14 @@ pub trait NetworkPeers { ) -> Result<(), String>; /// Remove peers from a peer set. - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); - - /// Add a peer to a set of peers. - /// - /// If the set has slots available, it will try to open a substream with this peer. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn add_to_peers_set( + /// Returns `Err` if `protocol` does not refer to a known protocol. + fn remove_peers_from_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: Vec, ) -> Result<(), String>; - /// Remove peers from a peer set. - /// - /// If we currently have an open substream with this peer, it will soon be closed. - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); - /// Returns the number of peers in the sync peer set we're connected to. fn sync_num_connected(&self) -> usize; } @@ -259,6 +242,10 @@ where } fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + // TODO: when we get rid of `Peerset`, we'll likely need to add some kind of async + // interface to `PeerStore`, otherwise we'll have trouble calling functions accepting + // `&mut self` via `Arc`. + // See https://github.com/paritytech/substrate/issues/14170. T::report_peer(self, who, cost_benefit) } @@ -298,20 +285,12 @@ where T::add_peers_to_reserved_set(self, protocol, peers) } - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { - T::remove_peers_from_reserved_set(self, protocol, peers) - } - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: Vec, ) -> Result<(), String> { - T::add_to_peers_set(self, protocol, peers) - } - - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { - T::remove_from_peers_set(self, protocol, peers) + T::remove_peers_from_reserved_set(self, protocol, peers) } fn sync_num_connected(&self) -> usize { @@ -435,9 +414,9 @@ pub trait NetworkNotification { /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two /// steps: /// - /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready /// for another notification, yielding a [`NotificationSenderReady`] token. - /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation /// can only fail if the underlying notification substream or connection has suddenly closed. /// /// An error is returned by [`NotificationSenderReady::send`] if there exists no open diff --git a/client/network/statement/Cargo.toml b/client/network/statement/Cargo.toml index a81e6a916c05c..63377710dc403 100644 --- a/client/network/statement/Cargo.toml +++ b/client/network/statement/Cargo.toml @@ -13,17 +13,14 @@ documentation = "https://docs.rs/sc-network-statement" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" async-channel = "1.8.0" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" libp2p = "0.51.3" log = "0.4.17" -pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-network = { version = "0.10.0-dev", path = "../" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 02cbab27a6a15..c5d83b59b260a 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -62,7 +62,7 @@ pub type Statements = Vec; pub type StatementImportFuture = oneshot::Receiver; mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sends us any statement. /// /// This forces node to verify it, thus the negative value here. Once statement is verified, @@ -297,10 +297,13 @@ where } }, SyncEvent::PeerDisconnected(remote) => { - self.network.remove_peers_from_reserved_set( + let result = self.network.remove_peers_from_reserved_set( self.protocol_name.clone(), iter::once(remote).collect(), ); + if let Err(err) = result { + log::error!(target: LOG_TARGET, "Failed to remove reserved peer: {err}"); + } }, } } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index ce713596011fb..8c02a4f1ec15f 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -16,17 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" +async-channel = "1.8.0" async-trait = "0.1.58" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" libp2p = "0.51.3" log = "0.4.17" -lru = "0.8.1" mockall = "0.11.3" prost = "0.11" -smallvec = "1.8.0" +schnellru = "0.2.1" +smallvec = "1.11.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } @@ -34,19 +35,18 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network = { version = "0.10.0-dev", path = "../" } sc-network-common = { version = "0.10.0-dev", path = "../common" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } sc-utils = { version = "4.0.0-dev", path = "../../utils" } -sp-arithmetic = { version = "6.0.0", path = "../../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] tokio = { version = "1.22.0", features = ["macros"] } quickcheck = { version = "1.0.3", default-features = false } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 256c0ad382b92..291157eae4b07 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -23,14 +23,11 @@ use crate::{ }; use codec::{Decode, Encode}; -use futures::{ - channel::{mpsc, oneshot}, - stream::StreamExt, -}; +use futures::{channel::oneshot, stream::StreamExt}; use libp2p::PeerId; use log::debug; -use lru::LruCache; use prost::Message; +use schnellru::{ByLength, LruMap}; use sc_client_api::BlockBackend; use sc_network::{ @@ -47,7 +44,6 @@ use sp_runtime::{ use std::{ cmp::min, hash::{Hash, Hasher}, - num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -57,7 +53,7 @@ const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sent us the same request multiple times. pub const SAME_REQUEST: Rep = Rep::new_fatal("Same block request multiple times"); @@ -136,11 +132,11 @@ enum SeenRequestsValue { /// Handler for incoming block requests from a remote peer. pub struct BlockRequestHandler { client: Arc, - request_receiver: mpsc::Receiver, + request_receiver: async_channel::Receiver, /// Maps from request to number of times we have seen this request. /// /// This is used to check if a peer is spamming us with the same request. - seen_requests: LruCache, SeenRequestsValue>, + seen_requests: LruMap, SeenRequestsValue>, } impl BlockRequestHandler @@ -157,7 +153,8 @@ where ) -> (Self, ProtocolConfig) { // Reserve enough request slots for one request per peer when we are at the maximum // number of peers. - let (tx, request_receiver) = mpsc::channel(num_peer_hint); + let capacity = std::cmp::max(num_peer_hint, 1); + let (tx, request_receiver) = async_channel::bounded(capacity); let mut protocol_config = generate_protocol_config( protocol_id, @@ -170,9 +167,8 @@ where ); protocol_config.inbound_queue = Some(tx); - let capacity = - NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); - let seen_requests = LruCache::new(capacity); + let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2); + let seen_requests = LruMap::new(capacity); (Self { client, request_receiver, seen_requests }, protocol_config) } @@ -239,7 +235,7 @@ where .difference(BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION) .is_empty(); - match self.seen_requests.get_mut(&key) { + match self.seen_requests.get(&key) { Some(SeenRequestsValue::First) => {}, Some(SeenRequestsValue::Fulfilled(ref mut requests)) => { *requests = requests.saturating_add(1); @@ -253,7 +249,7 @@ where } }, None => { - self.seen_requests.put(key.clone(), SeenRequestsValue::First); + self.seen_requests.insert(key.clone(), SeenRequestsValue::First); }, } @@ -280,7 +276,7 @@ where .iter() .any(|b| !b.header.is_empty() || !b.body.is_empty() || b.is_empty_justification) { - if let Some(value) = self.seen_requests.get_mut(&key) { + if let Some(value) = self.seen_requests.get(&key) { // If this is the first time we have processed this request, we need to change // it to `Fulfilled`. if let SeenRequestsValue::First = value { diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index a6db5a5d54c8c..d5c4957ab3d70 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -28,30 +28,24 @@ use codec::{Decode, Encode}; use futures::{FutureExt, StreamExt}; use futures_timer::Delay; use libp2p::PeerId; -use lru::LruCache; use prometheus_endpoint::{ register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, }; +use schnellru::{ByLength, LruMap}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network::{ - config::{ - FullNetworkConfiguration, NonDefaultSetConfig, ProtocolId, SyncMode as SyncOperationMode, - }, + config::{FullNetworkConfiguration, NonDefaultSetConfig, ProtocolId}, utils::LruHashSet, - NotificationsSink, ProtocolName, + NotificationsSink, ProtocolName, ReputationChange, }; use sc_network_common::{ role::Roles, sync::{ - message::{ - generic::{BlockData, BlockResponse}, - BlockAnnounce, BlockAnnouncesHandshake, BlockState, - }, + message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, warp::WarpSyncParams, BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent, - SyncMode, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -76,13 +70,25 @@ const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100) /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// If the block announces stream to peer has been inactive for two minutes meaning local node +/// If the block announces stream to peer has been inactive for 30 seconds meaning local node /// has not sent or received block announcements to/from the peer, report the node for inactivity, /// disconnect it and attempt to establish connection to some other peer. const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); +/// When `SyncingEngine` is started, wait two minutes before actually staring to count peers as +/// evicted. +/// +/// Parachain collator may incorrectly get evicted because it's waiting to receive a number of +/// relaychain blocks before it can start creating parachain blocks. During this wait, +/// `SyncingEngine` still counts it as active and as the peer is not sending blocks, it may get +/// evicted if a block is not received within the first 30 secons since the peer connected. +/// +/// To prevent this from happening, define a threshold for how long `SyncingEngine` should wait +/// before it starts evicting peers. +const INITIAL_EVICTION_WAIT_PERIOD: Duration = Duration::from_secs(2 * 60); + mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer send us a block announcement that failed at validation. @@ -168,10 +174,8 @@ pub struct Peer { pub known_blocks: LruHashSet, /// Notification sink. sink: NotificationsSink, - /// Instant when the last notification was sent to peer. - last_notification_sent: Instant, - /// Instant when the last notification was received from peer. - last_notification_received: Instant, + /// Is the peer inbound. + inbound: bool, } pub struct SyncingEngine { @@ -212,9 +216,6 @@ pub struct SyncingEngine { /// All connected peers. Contains both full and light node peers. peers: HashMap>, - /// Evicted peers - evicted: HashSet, - /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, @@ -232,8 +233,14 @@ pub struct SyncingEngine { /// Number of slots to allocate to light nodes. default_peers_set_num_light: usize, + /// Maximum number of inbound peers. + max_in_peers: usize, + + /// Number of inbound peers accepted so far. + num_in_peers: usize, + /// A cache for the data that was associated to a block announcement. - block_announce_data_cache: LruCache>, + block_announce_data_cache: LruMap>, /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, @@ -243,6 +250,15 @@ pub struct SyncingEngine { /// Prometheus metrics. metrics: Option, + + /// When the syncing was started. + /// + /// Stored as an `Option` so once the initial wait has passed, `SyncingEngine` + /// can reset the peer timers and continue with the normal eviction process. + syncing_started: Option, + + /// Instant when the last notification was sent or received. + last_notification_io: Instant, } impl SyncingEngine @@ -272,12 +288,7 @@ where warp_sync_protocol_name: Option, rx: sc_utils::mpsc::TracingUnboundedReceiver>, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { - let mode = match net_config.network_config.sync_mode { - SyncOperationMode::Full => SyncMode::Full, - SyncOperationMode::Fast { skip_proofs, storage_chain_mode } => - SyncMode::LightState { skip_proofs, storage_chain_mode }, - SyncOperationMode::Warp => SyncMode::Warp, - }; + let mode = net_config.network_config.sync_mode; let max_parallel_downloads = net_config.network_config.max_parallel_downloads; let max_blocks_per_request = if net_config.network_config.max_blocks_per_request > crate::MAX_BLOCKS_IN_RESPONSE as u32 @@ -287,12 +298,9 @@ where } else { net_config.network_config.max_blocks_per_request }; - let cache_capacity = NonZeroUsize::new( - (net_config.network_config.default_peers_set.in_peers as usize + - net_config.network_config.default_peers_set.out_peers as usize) - .max(1), - ) - .expect("cache capacity is not zero"); + let cache_capacity = (net_config.network_config.default_peers_set.in_peers + + net_config.network_config.default_peers_set.out_peers) + .max(1); let important_peers = { let mut imp_p = HashSet::new(); for reserved in &net_config.network_config.default_peers_set.reserved_nodes { @@ -366,6 +374,12 @@ where .flatten() .expect("Genesis block exists; qed"); + // `default_peers_set.in_peers` contains an unspecified amount of light peers so the number + // of full inbound peers must be calculated from the total full peer count + let max_full_peers = net_config.network_config.default_peers_set_num_full; + let max_out_peers = net_config.network_config.default_peers_set.out_peers; + let max_in_peers = (max_full_peers - max_out_peers) as usize; + Ok(( Self { roles, @@ -373,8 +387,7 @@ where chain_sync, network_service, peers: HashMap::new(), - evicted: HashSet::new(), - block_announce_data_cache: LruCache::new(cache_capacity), + block_announce_data_cache: LruMap::new(ByLength::new(cache_capacity)), block_announce_protocol_name, num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), @@ -387,8 +400,12 @@ where default_peers_set_no_slot_peers, default_peers_set_num_full, default_peers_set_num_light, + num_in_peers: 0usize, + max_in_peers, event_streams: Vec::new(), tick_timeout: Delay::new(TICK_TIMEOUT), + syncing_started: None, + last_notification_io: Instant::now(), metrics: if let Some(r) = metrics_registry { match Metrics::register(r, is_major_syncing.clone()) { Ok(metrics) => Some(metrics), @@ -450,29 +467,16 @@ where &mut self, validation_result: PollBlockAnnounceValidation, ) { - let (header, _is_best, who) = match validation_result { - PollBlockAnnounceValidation::Skip => return, + match validation_result { + PollBlockAnnounceValidation::Skip => {}, PollBlockAnnounceValidation::Nothing { is_best: _, who, announce } => { self.update_peer_info(&who); if let Some(data) = announce.data { if !data.is_empty() { - self.block_announce_data_cache.put(announce.header.hash(), data); + self.block_announce_data_cache.insert(announce.header.hash(), data); } } - - return - }, - PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { - self.update_peer_info(&who); - - if let Some(data) = announce.data { - if !data.is_empty() { - self.block_announce_data_cache.put(announce.header.hash(), data); - } - } - - (announce.header, is_best, who) }, PollBlockAnnounceValidation::Failure { who, disconnect } => { if disconnect { @@ -481,31 +485,8 @@ where } self.network_service.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); - return - }, - }; - - // to import header from announced block let's construct response to request that normally - // would have been sent over network (but it is not in our case) - let blocks_to_import = self.chain_sync.on_block_data( - &who, - None, - BlockResponse { - id: 0, - blocks: vec![BlockData { - hash: header.hash(), - header: Some(header), - body: None, - indexed_body: None, - receipt: None, - message_queue: None, - justification: None, - justifications: None, - }], }, - ); - - self.chain_sync.process_block_response_data(blocks_to_import); + } } /// Push a block announce validation. @@ -537,7 +518,6 @@ where }, }; peer.known_blocks.insert(hash); - peer.last_notification_received = Instant::now(); if peer.info.roles.is_full() { let is_best = match announce.state.unwrap_or(BlockState::Best) { @@ -588,7 +568,7 @@ where data: Some(data.clone()), }; - peer.last_notification_sent = Instant::now(); + self.last_notification_io = Instant::now(); peer.sink.send_sync_notification(message.encode()); } } @@ -607,6 +587,8 @@ where } pub async fn run(mut self) { + self.syncing_started = Some(Instant::now()); + loop { futures::future::poll_fn(|cx| self.poll(cx)).await; } @@ -619,36 +601,37 @@ where while let Poll::Ready(()) = self.tick_timeout.poll_unpin(cx) { self.report_metrics(); + self.tick_timeout.reset(TICK_TIMEOUT); - // go over all connected peers and check if any of them have been idle for a while. Idle - // in this case means that we haven't sent or received block announcements to/from this - // peer. If that is the case, because of #5685, it could be that the block announces - // substream is not actually open and and this peer is just wasting a slot and is should - // be replaced with some other node that is willing to send us block announcements. - for (id, peer) in self.peers.iter() { - // because of a delay between disconnecting a peer in `SyncingEngine` and getting - // the response back from `Protocol`, a peer might be reported and disconnect - // multiple times. To prevent this from happening (until the underlying issue is - // fixed), keep track of evicted peers and report and disconnect them only once. - if self.evicted.contains(id) { + // if `SyncingEngine` has just started, don't evict seemingly inactive peers right away + // as they may not have produced blocks not because they've disconnected but because + // they're still waiting to receive enough relaychain blocks to start producing blocks. + if let Some(started) = self.syncing_started { + if started.elapsed() < INITIAL_EVICTION_WAIT_PERIOD { continue } - let last_received_late = - peer.last_notification_received.elapsed() > INACTIVITY_EVICT_THRESHOLD; - let last_sent_late = - peer.last_notification_sent.elapsed() > INACTIVITY_EVICT_THRESHOLD; + self.syncing_started = None; + self.last_notification_io = Instant::now(); + } + + // if syncing hasn't sent or received any blocks within `INACTIVITY_EVICT_THRESHOLD`, + // it means the local node has stalled and is connected to peers who either don't + // consider it connected or are also all stalled. In order to unstall the node, + // disconnect all peers and allow `ProtocolController` to establish new connections. + if self.last_notification_io.elapsed() > INACTIVITY_EVICT_THRESHOLD { + log::debug!(target: "sync", "syncing has halted due to inactivity, evicting all peers"); - if last_received_late && last_sent_late { - log::debug!(target: "sync", "evict peer {id} since it has been idling for too long"); - self.network_service.report_peer(*id, rep::INACTIVE_SUBSTREAM); + for peer in self.peers.keys() { + self.network_service.report_peer(*peer, rep::INACTIVE_SUBSTREAM); self.network_service - .disconnect_peer(*id, self.block_announce_protocol_name.clone()); - self.evicted.insert(*id); + .disconnect_peer(*peer, self.block_announce_protocol_name.clone()); } - } - self.tick_timeout.reset(TICK_TIMEOUT); + // after all the peers have been evicted, start timer again to prevent evicting + // new peers that join after the old peer have been evicted + self.last_notification_io = Instant::now(); + } } while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { @@ -681,7 +664,7 @@ where .disconnect_peer(peer, self.block_announce_protocol_name.clone()); self.network_service.report_peer( peer, - sc_peerset::ReputationChange::new_fatal("Invalid justification"), + ReputationChange::new_fatal("Invalid justification"), ); } }, @@ -730,8 +713,9 @@ where remote, received_handshake, sink, + inbound, tx, - } => match self.on_sync_peer_connected(remote, &received_handshake, sink) { + } => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) { Ok(()) => { let _ = tx.send(true); }, @@ -744,7 +728,6 @@ where }, }, sc_network::SyncEvent::NotificationStreamClosed { remote } => { - self.evicted.remove(&remote); if self.on_sync_peer_disconnected(remote).is_err() { log::trace!( target: "sync", @@ -757,6 +740,7 @@ where for message in messages { if self.peers.contains_key(&remote) { if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { + self.last_notification_io = Instant::now(); self.push_block_announce_validation(remote, announce); // Make sure that the newly added block announce validation future @@ -800,15 +784,31 @@ where /// /// Returns a result if the handshake of this peer was indeed accepted. pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { - if self.peers.remove(&peer).is_some() { + if let Some(info) = self.peers.remove(&peer) { if self.important_peers.contains(&peer) { log::warn!(target: "sync", "Reserved peer {} disconnected", peer); } else { log::debug!(target: "sync", "{} disconnected", peer); } + if !self.default_peers_set_no_slot_connected_peers.remove(&peer) && + info.inbound && info.info.roles.is_full() + { + match self.num_in_peers.checked_sub(1) { + Some(value) => { + self.num_in_peers = value; + }, + None => { + log::error!( + target: "sync", + "trying to disconnect an inbound node which is not counted as inbound" + ); + debug_assert!(false); + }, + } + } + self.chain_sync.peer_disconnected(&peer); - self.default_peers_set_no_slot_connected_peers.remove(&peer); self.event_streams .retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer)).is_ok()); Ok(()) @@ -827,6 +827,7 @@ where who: PeerId, status: &BlockAnnouncesHandshake, sink: NotificationsSink, + inbound: bool, ) -> Result<(), ()> { log::trace!(target: "sync", "New peer {} {:?}", who, status); @@ -869,6 +870,15 @@ where let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + // make sure to accept no more than `--in-peers` many full nodes + if !no_slot_peer && + status.roles.is_full() && + inbound && self.num_in_peers == self.max_in_peers + { + log::debug!(target: "sync", "All inbound slots have been consumed, rejecting {who}"); + return Err(()) + } + if status.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full + @@ -897,8 +907,7 @@ where NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), ), sink, - last_notification_sent: Instant::now(), - last_notification_received: Instant::now(), + inbound, }; let req = if peer.info.roles.is_full() { @@ -916,8 +925,11 @@ where log::debug!(target: "sync", "Connected {}", who); self.peers.insert(who, peer); + if no_slot_peer { self.default_peers_set_no_slot_connected_peers.insert(who); + } else if inbound && status.roles.is_full() { + self.num_in_peers += 1; } if let Some(req) = req { diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 3f1cbebd57255..175c1c43f46f7 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -78,7 +78,7 @@ use sp_consensus::{ }; use sp_runtime::{ traits::{ - Block as BlockT, CheckedSub, Hash, HashFor, Header as HeaderT, NumberFor, One, + Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, EncodedJustification, Justifications, @@ -148,7 +148,7 @@ const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a /// database read error. pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); @@ -515,8 +515,12 @@ where phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), total_bytes: 0, }), - (None, SyncMode::Warp, _) => - Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), + (None, SyncMode::Warp, _) => Some(WarpSyncProgress { + phase: WarpSyncPhase::AwaitingPeers { + required_peers: MIN_PEERS_TO_START_WARP_SYNC, + }, + total_bytes: 0, + }), (Some(sync), _, _) => Some(sync.progress()), _ => None, }; @@ -999,19 +1003,6 @@ where Ok(self.validate_and_queue_blocks(new_blocks, gap)) } - fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>) { - match blocks_to_import { - Ok(OnBlockData::Import(origin, blocks)) => self.import_blocks(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => self.send_block_request(peer, req), - Ok(OnBlockData::Continue) => {}, - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - }, - } - } - fn on_block_justification( &mut self, who: PeerId, @@ -1495,7 +1486,6 @@ where match self.mode { SyncMode::Full => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::LightState { storage_chain_mode: true, .. } => @@ -1508,7 +1498,6 @@ where fn skip_execution(&self) -> bool { match self.mode { SyncMode::Full => false, - SyncMode::Light => true, SyncMode::LightState { .. } => true, SyncMode::Warp => true, } @@ -1755,18 +1744,6 @@ where return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } - let requires_additional_data = self.mode != SyncMode::Light || !known_parent; - if !requires_additional_data { - trace!( - target: "sync", - "Importing new header announced from {}: {} {:?}", - who, - hash, - announce.header, - ); - return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } - } - if self.status().state == SyncState::Idle { trace!( target: "sync", @@ -3160,7 +3137,7 @@ fn validate_blocks( } if let (Some(header), Some(body)) = (&b.header, &b.body) { let expected = *header.extrinsics_root(); - let got = HashFor::::ordered_trie_root( + let got = HashingFor::::ordered_trie_root( body.iter().map(Encode::encode).collect(), sp_runtime::StateVersion::V0, ); diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs index 5300638ce89b0..838c6cf7667a2 100644 --- a/client/network/sync/src/mock.rs +++ b/client/network/sync/src/mock.rs @@ -59,7 +59,6 @@ mockall::mock! { request: Option>, response: BlockResponse, ) -> Result, BadPeer>; - fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>); fn on_block_justification( &mut self, who: PeerId, diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs index c882633993c8b..885eb1f8da593 100644 --- a/client/network/sync/src/service/mock.rs +++ b/client/network/sync/src/service/mock.rs @@ -25,9 +25,8 @@ use sc_network::{ request_responses::{IfDisconnected, RequestFailure}, types::ProtocolName, NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, - NotificationSenderError, NotificationSenderT, + NotificationSenderError, NotificationSenderT, ReputationChange, }; -use sc_peerset::ReputationChange; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; @@ -100,13 +99,11 @@ mockall::mock! { protocol: ProtocolName, peers: HashSet, ) -> Result<(), String>; - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: Vec ) -> Result<(), String>; - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); fn sync_num_connected(&self) -> usize; } diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs index f87de1c4c3ecc..12a47d6a9b544 100644 --- a/client/network/sync/src/service/network.rs +++ b/client/network/sync/src/service/network.rs @@ -22,9 +22,8 @@ use libp2p::PeerId; use sc_network::{ request_responses::{IfDisconnected, RequestFailure}, types::ProtocolName, - NetworkNotification, NetworkPeers, NetworkRequest, + NetworkNotification, NetworkPeers, NetworkRequest, ReputationChange, }; -use sc_peerset::ReputationChange; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::sync::Arc; @@ -159,7 +158,7 @@ mod tests { let peer = PeerId::random(); let proto = ProtocolName::from("test-protocol"); let proto_clone = proto.clone(); - let change = sc_peerset::ReputationChange::new_fatal("test-change"); + let change = sc_network::ReputationChange::new_fatal("test-change"); let mut mock_network = MockNetwork::new(); mock_network diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 0fcf17158179e..305f0ee6838a2 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -150,7 +150,7 @@ where } else { values.key_values }; - let mut entry = self.state.entry(values.state_root).or_default(); + let entry = self.state.entry(values.state_root).or_default(); if entry.0.len() > 0 && entry.1.len() > 1 { // Already imported child_trie with same root. // Warning this will not work with parallel download. diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 93597453aa8a2..ed14b889cbb54 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -20,14 +20,11 @@ use crate::schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse}; use codec::{Decode, Encode}; -use futures::{ - channel::{mpsc, oneshot}, - stream::StreamExt, -}; +use futures::{channel::oneshot, stream::StreamExt}; use libp2p::PeerId; use log::{debug, trace}; -use lru::LruCache; use prost::Message; +use schnellru::{ByLength, LruMap}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_network::{ @@ -38,7 +35,6 @@ use sp_runtime::traits::Block as BlockT; use std::{ hash::{Hash, Hasher}, - num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -48,7 +44,7 @@ const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigg const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sent us the same request multiple times. pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); @@ -114,11 +110,11 @@ enum SeenRequestsValue { /// Handler for incoming block requests from a remote peer. pub struct StateRequestHandler { client: Arc, - request_receiver: mpsc::Receiver, + request_receiver: async_channel::Receiver, /// Maps from request to number of times we have seen this request. /// /// This is used to check if a peer is spamming us with the same request. - seen_requests: LruCache, SeenRequestsValue>, + seen_requests: LruMap, SeenRequestsValue>, } impl StateRequestHandler @@ -135,7 +131,8 @@ where ) -> (Self, ProtocolConfig) { // Reserve enough request slots for one request per peer when we are at the maximum // number of peers. - let (tx, request_receiver) = mpsc::channel(num_peer_hint); + let capacity = std::cmp::max(num_peer_hint, 1); + let (tx, request_receiver) = async_channel::bounded(capacity); let mut protocol_config = generate_protocol_config( protocol_id, @@ -148,9 +145,8 @@ where ); protocol_config.inbound_queue = Some(tx); - let capacity = - NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); - let seen_requests = LruCache::new(capacity); + let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2); + let seen_requests = LruMap::new(capacity); (Self { client, request_receiver, seen_requests }, protocol_config) } @@ -183,7 +179,7 @@ where let mut reputation_changes = Vec::new(); - match self.seen_requests.get_mut(&key) { + match self.seen_requests.get(&key) { Some(SeenRequestsValue::First) => {}, Some(SeenRequestsValue::Fulfilled(ref mut requests)) => { *requests = requests.saturating_add(1); @@ -193,7 +189,7 @@ where } }, None => { - self.seen_requests.put(key.clone(), SeenRequestsValue::First); + self.seen_requests.insert(key.clone(), SeenRequestsValue::First); }, } @@ -250,7 +246,7 @@ where .last() .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), ); - if let Some(value) = self.seen_requests.get_mut(&key) { + if let Some(value) = self.seen_requests.get(&key) { // If this is the first time we have processed this request, we need to change // it to `Fulfilled`. if let SeenRequestsValue::First = value { diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 7061d6485d092..a49a65af51d0b 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -17,10 +17,7 @@ //! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. use codec::Decode; -use futures::{ - channel::{mpsc, oneshot}, - stream::StreamExt, -}; +use futures::{channel::oneshot, stream::StreamExt}; use log::debug; use sc_network::{ @@ -36,6 +33,9 @@ use std::{sync::Arc, time::Duration}; const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; +/// Incoming warp requests bounded queue size. +const MAX_WARP_REQUEST_QUEUE: usize = 20; + /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. pub fn generate_request_response_config>( @@ -72,7 +72,7 @@ fn generate_legacy_protocol_name(protocol_id: ProtocolId) -> String { /// Handler for incoming grandpa warp sync requests from a remote peer. pub struct RequestHandler { backend: Arc>, - request_receiver: mpsc::Receiver, + request_receiver: async_channel::Receiver, } impl RequestHandler { @@ -83,7 +83,7 @@ impl RequestHandler { fork_id: Option<&str>, backend: Arc>, ) -> (Self, RequestResponseConfig) { - let (tx, request_receiver) = mpsc::channel(20); + let (tx, request_receiver) = async_channel::bounded(MAX_WARP_REQUEST_QUEUE); let mut request_response_config = generate_request_response_config(protocol_id, genesis_hash, fork_id); diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index af519008dddaf..ddd218051c484 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -32,9 +32,8 @@ sc-network-sync = { version = "0.10.0-dev", path = "../sync" } sc-service = { version = "0.10.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/test/src/fuzz.rs b/client/network/test/src/fuzz.rs new file mode 100644 index 0000000000000..2e288accd80bc --- /dev/null +++ b/client/network/test/src/fuzz.rs @@ -0,0 +1,418 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Fuzz test emulates network events and peer connection handling by `ProtocolController` +//! and `PeerStore` to discover possible inconsistencies in peer management. + +use futures::prelude::*; +use libp2p::PeerId; +use rand::{ + distributions::{Distribution, Uniform, WeightedIndex}, + seq::IteratorRandom, +}; +use sc_network::{ + peer_store::{PeerStore, PeerStoreProvider}, + protocol_controller::{IncomingIndex, Message, ProtoSetConfig, ProtocolController, SetId}, + ReputationChange, +}; +use sc_utils::mpsc::tracing_unbounded; +use std::collections::{HashMap, HashSet}; + +/// Peer events as observed by `Notifications` / fuzz test. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +enum Event { + /// Either API requested to disconnect from the peer, or the peer dropped. + Disconnected, + /// Incoming request. + Incoming, + /// Answer from PSM: accept. + PsmAccept, + /// Answer from PSM: reject. + PsmReject, + /// Command from PSM: connect. + PsmConnect, + /// Command from PSM: drop connection. + PsmDrop, +} + +/// Simplified peer state as thought by `Notifications` / fuzz test. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +enum State { + /// Peer is not connected. + Disconnected, + /// We have an inbound connection, but have not decided yet whether to accept it. + Incoming(IncomingIndex), + /// Peer is connected via an inbound connection. + Inbound, + /// Peer is connected via an outbound connection. + Outbound, +} + +/// Bare simplified state without incoming index. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +enum BareState { + /// Peer is not connected. + Disconnected, + /// We have an inbound connection, but have not decided yet whether to accept it. + Incoming, + /// Peer is connected via an inbound connection. + Inbound, + /// Peer is connected via an outbound connection. + Outbound, +} + +fn discard_incoming_index(state: State) -> BareState { + match state { + State::Disconnected => BareState::Disconnected, + State::Incoming(_) => BareState::Incoming, + State::Inbound => BareState::Inbound, + State::Outbound => BareState::Outbound, + } +} + +#[tokio::test] +async fn run() { + sp_tracing::try_init_simple(); + + for _ in 0..50 { + test_once().await; + } +} + +async fn test_once() { + // Allowed events that can be received in a specific state. + let allowed_events: HashMap> = [ + ( + BareState::Disconnected, + [Event::Incoming, Event::PsmConnect, Event::PsmDrop /* must be ignored */] + .into_iter() + .collect::>(), + ), + ( + BareState::Incoming, + [Event::PsmAccept, Event::PsmReject].into_iter().collect::>(), + ), + ( + BareState::Inbound, + [Event::Disconnected, Event::PsmDrop, Event::PsmConnect /* must be ignored */] + .into_iter() + .collect::>(), + ), + ( + BareState::Outbound, + [Event::Disconnected, Event::PsmDrop, Event::PsmConnect /* must be ignored */] + .into_iter() + .collect::>(), + ), + ] + .into_iter() + .collect(); + + // PRNG to use. + let mut rng = rand::thread_rng(); + + // Nodes that the peerset knows about. + let mut known_nodes = HashMap::::new(); + // Nodes that we have reserved. Always a subset of `known_nodes`. + let mut reserved_nodes = HashSet::::new(); + + // Bootnodes for `PeerStore` initialization. + let bootnodes = (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id, State::Disconnected); + id + }) + .collect(); + + let peer_store = PeerStore::new(bootnodes); + let mut peer_store_handle = peer_store.handle(); + + let (to_notifications, mut from_controller) = + tracing_unbounded("test_to_notifications", 10_000); + let (protocol_handle, protocol_controller) = ProtocolController::new( + SetId::from(0), + ProtoSetConfig { + reserved_nodes: { + (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id, State::Disconnected); + reserved_nodes.insert(id); + id + }) + .collect() + }, + in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + }, + to_notifications, + Box::new(peer_store_handle.clone()), + ); + + tokio::spawn(peer_store.run()); + tokio::spawn(protocol_controller.run()); + + // List of nodes the user of `peerset` assumes it's connected to. Always a subset of + // `known_nodes`. + let mut connected_nodes = HashSet::::new(); + // List of nodes the user of `peerset` called `incoming` with and that haven't been + // accepted or rejected yet. + let mut incoming_nodes = HashMap::::new(); + // Next id for incoming connections. + let mut next_incoming_id = IncomingIndex(0); + + // The loop below is effectively synchronous, so for `PeerStore` & `ProtocolController` + // runners, spawned above, to advance, we use `spawn_blocking`. + let _ = tokio::task::spawn_blocking(move || { + // PRNG to use in `spawn_blocking` context. + let mut rng = rand::thread_rng(); + + // Perform a certain number of actions while checking that the state is consistent. If we + // reach the end of the loop, the run has succeeded. + // Note that with the ACKing and event postponing mechanism in `ProtocolController` + // the test time grows quadratically with the number of iterations below. + for _ in 0..2500 { + // Peer we are working with. + let mut current_peer = None; + // Current event for state transition validation. + let mut current_event = None; + // Last peer state for allowed event validation. + let mut last_state = None; + + // Each of these weights corresponds to an action that we may perform. + let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; + + match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { + // If we generate 0, try to grab the next message from `ProtocolController`. + 0 => match from_controller.next().now_or_never() { + Some(Some(Message::Connect { peer_id, .. })) => { + log::info!("PSM: connecting to peer {}", peer_id); + + let state = known_nodes.get_mut(&peer_id).unwrap(); + if matches!(*state, State::Incoming(_)) { + log::info!( + "Awaiting incoming response, ignoring obsolete Connect from PSM for peer {}", + peer_id, + ); + continue + } + + last_state = Some(*state); + + if *state != State::Inbound { + *state = State::Outbound; + } + + if !connected_nodes.insert(peer_id) { + log::info!("Request to connect to an already connected node {peer_id}"); + } + + current_peer = Some(peer_id); + current_event = Some(Event::PsmConnect); + }, + Some(Some(Message::Drop { peer_id, .. })) => { + log::info!("PSM: dropping peer {}", peer_id); + + let state = known_nodes.get_mut(&peer_id).unwrap(); + if matches!(*state, State::Incoming(_)) { + log::info!( + "Awaiting incoming response, ignoring obsolete Drop from PSM for peer {}", + peer_id, + ); + continue + } + + last_state = Some(*state); + *state = State::Disconnected; + + if !connected_nodes.remove(&peer_id) { + log::info!("Ignoring attempt to drop a disconnected peer {}", peer_id); + } + + current_peer = Some(peer_id); + current_event = Some(Event::PsmDrop); + }, + Some(Some(Message::Accept(n))) => { + log::info!("PSM: accepting index {}", n.0); + + let peer_id = incoming_nodes.remove(&n).unwrap(); + + let state = known_nodes.get_mut(&peer_id).unwrap(); + match *state { + State::Incoming(incoming_index) => + if n.0 < incoming_index.0 { + log::info!( + "Ignoring obsolete Accept for {:?} while awaiting {:?} for peer {}", + n, incoming_index, peer_id, + ); + continue + } else if n.0 > incoming_index.0 { + panic!( + "Received {:?} while awaiting {:?} for peer {}", + n, incoming_index, peer_id, + ); + }, + _ => {}, + } + + last_state = Some(*state); + *state = State::Inbound; + + assert!(connected_nodes.insert(peer_id)); + + current_peer = Some(peer_id); + current_event = Some(Event::PsmAccept); + }, + Some(Some(Message::Reject(n))) => { + log::info!("PSM: rejecting index {}", n.0); + + let peer_id = incoming_nodes.remove(&n).unwrap(); + + let state = known_nodes.get_mut(&peer_id).unwrap(); + match *state { + State::Incoming(incoming_index) => + if n.0 < incoming_index.0 { + log::info!( + "Ignoring obsolete Reject for {:?} while awaiting {:?} for peer {}", + n, incoming_index, peer_id, + ); + continue + } else if n.0 > incoming_index.0 { + panic!( + "Received {:?} while awaiting {:?} for peer {}", + n, incoming_index, peer_id, + ); + }, + _ => {}, + } + + last_state = Some(*state); + *state = State::Disconnected; + + assert!(!connected_nodes.contains(&peer_id)); + + current_peer = Some(peer_id); + current_event = Some(Event::PsmReject); + }, + Some(None) => panic!(), + None => {}, + }, + + // If we generate 1, discover a new node. + 1 => { + let new_id = PeerId::random(); + known_nodes.insert(new_id, State::Disconnected); + peer_store_handle.add_known_peer(new_id); + }, + + // If we generate 2, adjust a random reputation. + 2 => + if let Some(id) = known_nodes.keys().choose(&mut rng) { + let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); + peer_store_handle.report_peer(*id, ReputationChange::new(val, "")); + }, + + // If we generate 3, disconnect from a random node. + 3 => + if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { + log::info!("Disconnected from {}", id); + connected_nodes.remove(&id); + + let state = known_nodes.get_mut(&id).unwrap(); + last_state = Some(*state); + *state = State::Disconnected; + + protocol_handle.dropped(id); + + current_peer = Some(id); + current_event = Some(Event::Disconnected); + }, + + // If we generate 4, connect to a random node. + 4 => { + if let Some(id) = known_nodes + .keys() + .filter(|n| { + incoming_nodes.values().all(|m| m != *n) && + !connected_nodes.contains(*n) + }) + .choose(&mut rng) + .cloned() + { + log::info!("Incoming connection from {}, index {}", id, next_incoming_id.0); + protocol_handle.incoming_connection(id, next_incoming_id); + incoming_nodes.insert(next_incoming_id, id); + + let state = known_nodes.get_mut(&id).unwrap(); + last_state = Some(*state); + *state = State::Incoming(next_incoming_id); + + next_incoming_id.0 += 1; + + current_peer = Some(id); + current_event = Some(Event::Incoming); + } + }, + + // 5 and 6 are the reserved-only mode. + 5 => { + log::info!("Set reserved only"); + protocol_handle.set_reserved_only(true); + }, + 6 => { + log::info!("Unset reserved only"); + protocol_handle.set_reserved_only(false); + }, + + // 7 and 8 are about switching a random node in or out of reserved mode. + 7 => { + if let Some(id) = + known_nodes.keys().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) + { + log::info!("Add reserved: {}", id); + protocol_handle.add_reserved_peer(*id); + reserved_nodes.insert(*id); + } + }, + 8 => + if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { + log::info!("Remove reserved: {}", id); + reserved_nodes.remove(&id); + protocol_handle.remove_reserved_peer(id); + }, + + _ => unreachable!(), + } + + // Validate state transitions. + if let Some(peer_id) = current_peer { + let event = current_event.unwrap(); + let last_state = discard_incoming_index(last_state.unwrap()); + if !allowed_events.get(&last_state).unwrap().contains(&event) { + panic!( + "Invalid state transition: {:?} x {:?} for peer {}", + last_state, event, peer_id, + ); + } + } + } + }) + .await; +} diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index a9ff38e4ea608..2a20da5a556b7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -20,13 +20,14 @@ #[cfg(test)] mod block_import; #[cfg(test)] +mod fuzz; +#[cfg(test)] mod service; #[cfg(test)] mod sync; use std::{ collections::HashMap, - marker::PhantomData, pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, @@ -39,7 +40,7 @@ use log::trace; use parking_lot::Mutex; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{ - backend::{AuxStore, Backend, Finalizer, TransactionFor}, + backend::{AuxStore, Backend, Finalizer}, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, FinalityNotifications, ImportNotifications, }; @@ -53,6 +54,7 @@ use sc_network::{ FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, Role, SyncMode, TransportConfig, }, + peer_store::PeerStore, request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, @@ -114,8 +116,8 @@ impl PassThroughVerifier { impl Verifier for PassThroughVerifier { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { if block.fork_choice.is_none() { block.fork_choice = Some(ForkChoiceStrategy::LongestChain); }; @@ -207,7 +209,6 @@ impl PeersClient { #[async_trait::async_trait] impl BlockImport for PeersClient { type Error = ConsensusError; - type Transaction = (); async fn check_block( &mut self, @@ -218,9 +219,9 @@ impl BlockImport for PeersClient { async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result { - self.client.import_block(block.clear_storage_changes_and_mutate()).await + self.client.import_block(block).await } } @@ -245,7 +246,6 @@ pub struct Peer { impl Peer where B: BlockImport + Send + Sync, - B::Transaction: Send, { /// Get this peer ID. pub fn id(&self) -> PeerId { @@ -553,24 +553,12 @@ where } pub trait BlockImportAdapterFull: - BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError, - > + Send - + Sync - + Clone + BlockImport + Send + Sync + Clone { } impl BlockImportAdapterFull for T where - T: BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError, - > + Send - + Sync - + Clone + T: BlockImport + Send + Sync + Clone { } @@ -580,27 +568,23 @@ impl BlockImportAdapterFull for T where /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. #[derive(Clone)] -pub struct BlockImportAdapter { +pub struct BlockImportAdapter { inner: I, - _phantom: PhantomData, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. pub fn new(inner: I) -> Self { - Self { inner, _phantom: PhantomData } + Self { inner } } } #[async_trait::async_trait] -impl BlockImport for BlockImportAdapter +impl BlockImport for BlockImportAdapter where I: BlockImport + Send + Sync, - I::Transaction: Send, - Transaction: Send + 'static, { type Error = ConsensusError; - type Transaction = Transaction; async fn check_block( &mut self, @@ -611,9 +595,9 @@ where async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, ) -> Result { - self.inner.import_block(block.clear_storage_changes_and_mutate()).await + self.inner.import_block(block).await } } @@ -627,8 +611,8 @@ struct VerifierAdapter { impl Verifier for VerifierAdapter { async fn verify( &mut self, - block: BlockImportParams, - ) -> Result, String> { + block: BlockImportParams, + ) -> Result, String> { let hash = block.header.hash(); self.verifier.lock().await.verify(block).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); @@ -711,10 +695,7 @@ pub struct FullPeerConfig { } #[async_trait::async_trait] -pub trait TestNetFactory: Default + Sized + Send -where - >::Transaction: Send, -{ +pub trait TestNetFactory: Default + Sized + Send { type Verifier: 'static + Verifier; type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default + Send; @@ -770,7 +751,7 @@ where *genesis_extra_storage = storage; } - if matches!(config.sync_mode, SyncMode::Fast { .. } | SyncMode::Warp) { + if matches!(config.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp) { test_client_builder = test_client_builder.set_no_genesis(); } let backend = test_client_builder.backend(); @@ -915,6 +896,12 @@ where }); } + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + self.spawn_task(peer_store.run().boxed()); + let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let network = NetworkWorker::new(sc_network::config::Params { @@ -923,6 +910,7 @@ where tokio::spawn(f); }), network_config: full_net_config, + peer_store: peer_store_handle, genesis_hash, protocol_id, fork_id, diff --git a/client/network/test/src/service.rs b/client/network/test/src/service.rs index 8c15d6b09ea45..68e780545bb17 100644 --- a/client/network/test/src/service.rs +++ b/client/network/test/src/service.rs @@ -23,6 +23,7 @@ use sc_consensus::{ImportQueue, Link}; use sc_network::{ config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig}, event::Event, + peer_store::PeerStore, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker, }; @@ -134,8 +135,8 @@ impl TestNetworkBuilder { impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, - mut block: sc_consensus::BlockImportParams, - ) -> Result, String> { + mut block: sc_consensus::BlockImportParams, + ) -> Result, String> { block.finalized = self.0; block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); Ok(block) @@ -220,6 +221,12 @@ impl TestNetworkBuilder { full_net_config.add_request_response_protocol(config); } + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + tokio::spawn(peer_store.run().boxed()); + let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let worker = NetworkWorker::< @@ -233,6 +240,7 @@ impl TestNetworkBuilder { }), genesis_hash, network_config: full_net_config, + peer_store: peer_store_handle, protocol_id, fork_id, metrics_registry: None, diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 81707445dc9d3..389177b4aaf1b 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -550,7 +550,10 @@ async fn can_sync_explicit_forks() { .await; } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +// TODO: for unknown reason, this test is flaky on a multithreaded runtime, so we run it +// in a single-threaded mode. +// See issue https://github.com/paritytech/substrate/issues/14622. +#[tokio::test] async fn syncs_header_only_forks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); @@ -1132,7 +1135,7 @@ async fn syncs_state() { let mut config_two = FullPeerConfig::default(); config_two.extra_storage = Some(genesis_storage); config_two.sync_mode = - SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; + SyncMode::LightState { skip_proofs: *skip_proofs, storage_chain_mode: false }; net.add_full_peer_with_config(config_two); let hashes = net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. @@ -1175,7 +1178,7 @@ async fn syncs_indexed_blocks() { net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, ..Default::default() }); net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, - sync_mode: SyncMode::Fast { skip_proofs: false, storage_chain_mode: true }, + sync_mode: SyncMode::LightState { skip_proofs: false, storage_chain_mode: true }, ..Default::default() }); net.peer(0).generate_blocks_at( diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index 3ae1dc5908df4..ce62c89e8a55a 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -13,16 +13,14 @@ documentation = "https://docs.rs/sc-network-transactions" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +array-bytes = "6.1" +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" libp2p = "0.51.3" log = "0.4.17" -pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../" } sc-network-common = { version = "0.10.0-dev", path = "../common" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } sc-utils = { version = "4.0.0-dev", path = "../../utils" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index f57556d3986b0..b46733d427230 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -65,7 +65,7 @@ pub mod config; pub type Transactions = Vec; mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sends us any transaction. /// /// This forces node to verify it, thus the negative value here. Once transaction is verified, @@ -97,21 +97,19 @@ impl Metrics { } } -#[pin_project::pin_project] struct PendingTransaction { - #[pin] validation: TransactionImportFuture, tx_hash: H, } +impl Unpin for PendingTransaction {} + impl Future for PendingTransaction { type Output = (H, TransactionImport); - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let mut this = self.project(); - - if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)) + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + if let Poll::Ready(import_result) = self.validation.poll_unpin(cx) { + return Poll::Ready((self.tx_hash.clone(), import_result)) } Poll::Pending @@ -340,10 +338,13 @@ where } }, SyncEvent::PeerDisconnected(remote) => { - self.network.remove_peers_from_reserved_set( + let result = self.network.remove_peers_from_reserved_set( self.protocol_name.clone(), iter::once(remote).collect(), ); + if let Err(err) = result { + log::error!(target: "sync", "Remove reserved peer failed: {}", err); + } }, } } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index a2ab54ba5e638..f52b0aa2878f6 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -13,14 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" bytes = "1.1" -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" hyper = { version = "0.14.16", features = ["stream", "http2"] } -hyper-rustls = { version = "0.23.0", features = ["http2"] } +hyper-rustls = { version = "0.24.0", features = ["http2"] } libp2p = "0.51.3" num_cpus = "1.13" once_cell = "1.8" @@ -31,12 +31,15 @@ tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } -sc-peerset = { version = "4.0.0-dev", path = "../peerset" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } +sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } +log = "0.4.17" [dev-dependencies] lazy_static = "1.4.0" @@ -46,7 +49,7 @@ sc-client-db = { version = "0.10.0-dev", default-features = true, path = "../db" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index a15f03bab6f84..c7df5784d329e 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -25,8 +25,8 @@ pub use http::SharedClient; use libp2p::{Multiaddr, PeerId}; use sp_core::{ offchain::{ - self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, - OpaqueNetworkState, StorageKind, Timestamp, + self, HttpError, HttpRequestId, HttpRequestStatus, OpaqueMultiaddr, OpaqueNetworkState, + Timestamp, }, OpaquePeerId, }; @@ -36,110 +36,6 @@ mod http; mod timestamp; -fn unavailable_yet(name: &str) -> R { - tracing::error!( - target: super::LOG_TARGET, - "The {:?} API is not available for offchain workers yet. Follow \ - https://github.com/paritytech/substrate/issues/1458 for details", - name - ); - Default::default() -} - -const LOCAL_DB: &str = "LOCAL (fork-aware) DB"; - -/// Offchain DB reference. -#[derive(Debug, Clone)] -pub struct Db { - /// Persistent storage database. - persistent: Storage, -} - -impl Db { - /// Create new instance of Offchain DB. - pub fn new(persistent: Storage) -> Self { - Self { persistent } - } - - /// Create new instance of Offchain DB, backed by given backend. - pub fn factory_from_backend( - backend: &Backend, - ) -> Option> - where - Backend: sc_client_api::Backend, - Block: sp_runtime::traits::Block, - Storage: 'static, - { - sc_client_api::Backend::offchain_storage(backend).map(|db| Box::new(Self::new(db)) as _) - } -} - -impl offchain::DbExternalities for Db { - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - tracing::debug!( - target: "offchain-worker::storage", - ?kind, - key = ?array_bytes::bytes2hex("", key), - value = ?array_bytes::bytes2hex("", value), - "Write", - ); - match kind { - StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - tracing::debug!( - target: "offchain-worker::storage", - ?kind, - key = ?array_bytes::bytes2hex("", key), - "Clear", - ); - match kind { - StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - tracing::debug!( - target: "offchain-worker::storage", - ?kind, - key = ?array_bytes::bytes2hex("", key), - new_value = ?array_bytes::bytes2hex("", new_value), - old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)), - "CAS", - ); - match kind { - StorageKind::PERSISTENT => - self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - let result = match kind { - StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - }; - tracing::debug!( - target: "offchain-worker::storage", - ?kind, - key = ?array_bytes::bytes2hex("", key), - result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)), - "Read", - ); - result - } -} - /// Asynchronous offchain API. /// /// NOTE this is done to prevent recursive calls into the runtime @@ -324,13 +220,12 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; - use libp2p::PeerId; use sc_client_db::offchain::LocalStorage; use sc_network::{ config::MultiaddrWithPeerId, types::ProtocolName, NetworkPeers, NetworkStateInfo, + ReputationChange, }; - use sc_peerset::ReputationChange; - use sp_core::offchain::{DbExternalities, Externalities}; + use sp_core::offchain::{storage::OffchainDb, DbExternalities, Externalities, StorageKind}; use std::time::SystemTime; pub(super) struct TestNetwork(); @@ -388,22 +283,14 @@ mod tests { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, _protocol: ProtocolName, - _peers: HashSet, + _peers: Vec, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - fn sync_num_connected(&self) -> usize { unimplemented!(); } @@ -431,8 +318,8 @@ mod tests { AsyncApi::new(mock, false, shared_client) } - fn offchain_db() -> Db { - Db::new(LocalStorage::new_test()) + fn offchain_db() -> OffchainDb { + OffchainDb::new(LocalStorage::new_test()) } #[test] diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index e3872614eae4d..7ca5e3fd13af7 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -162,7 +162,7 @@ impl HttpApi { self.requests .insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); - tracing::error!( + tracing::trace!( target: LOG_TARGET, id = %new_id.0, %method, diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index f46fb637a92d3..a11ac7d86ecb8 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,22 +35,26 @@ #![warn(missing_docs)] -use std::{fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, sync::Arc}; use futures::{ future::{ready, Future}, prelude::*, }; use parking_lot::Mutex; +use sc_client_api::BlockchainEvents; use sc_network::{NetworkPeers, NetworkStateInfo}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; +use sp_core::{offchain, traits::SpawnNamed}; +use sp_externalities::Extension; +use sp_keystore::{KeystoreExt, KeystorePtr}; use sp_runtime::traits::{self, Header}; use threadpool::ThreadPool; mod api; -pub use api::Db as OffchainDb; +pub use sp_core::offchain::storage::OffchainDb; pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; const LOG_TARGET: &str = "offchain-worker"; @@ -61,65 +65,160 @@ pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {} impl NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {} +/// Special type that implements [`OffchainStorage`](offchain::OffchainStorage). +/// +/// This type can not be constructed and should only be used when passing `None` as `offchain_db` to +/// [`OffchainWorkerOptions`] to make the compiler happy. +#[derive(Clone)] +pub enum NoOffchainStorage {} + +impl offchain::OffchainStorage for NoOffchainStorage { + fn set(&mut self, _: &[u8], _: &[u8], _: &[u8]) { + unimplemented!("`NoOffchainStorage` can not be constructed!") + } + + fn remove(&mut self, _: &[u8], _: &[u8]) { + unimplemented!("`NoOffchainStorage` can not be constructed!") + } + + fn get(&self, _: &[u8], _: &[u8]) -> Option> { + unimplemented!("`NoOffchainStorage` can not be constructed!") + } + + fn compare_and_set(&mut self, _: &[u8], _: &[u8], _: Option<&[u8]>, _: &[u8]) -> bool { + unimplemented!("`NoOffchainStorage` can not be constructed!") + } +} + /// Options for [`OffchainWorkers`] -pub struct OffchainWorkerOptions { +pub struct OffchainWorkerOptions { + /// Provides access to the runtime api. + pub runtime_api_provider: Arc, + /// Provides access to the keystore. + pub keystore: Option, + /// Provides access to the offchain database. + /// + /// Use [`NoOffchainStorage`] as type when passing `None` to have some type that works. + pub offchain_db: Option, + /// Provides access to the transaction pool. + pub transaction_pool: Option>, + /// Provides access to network information. + pub network_provider: Arc, + /// Is the node running as validator? + pub is_validator: bool, /// Enable http requests from offchain workers? /// /// If not enabled, any http request will panic. pub enable_http_requests: bool, + /// Callback to create custom [`Extension`]s that should be registered for the + /// `offchain_worker` runtime call. + /// + /// These [`Extension`]s are registered along-side the default extensions and are accessible in + /// the host functions. + /// + /// # Example: + /// + /// ```nocompile + /// custom_extensions: |block_hash| { + /// vec![MyCustomExtension::new()] + /// } + /// ``` + pub custom_extensions: CE, } /// An offchain workers manager. -pub struct OffchainWorkers { - client: Arc, - _block: PhantomData, +pub struct OffchainWorkers { + runtime_api_provider: Arc, thread_pool: Mutex, shared_http_client: api::SharedClient, - enable_http: bool, + enable_http_requests: bool, + keystore: Option, + offchain_db: Option>, + transaction_pool: Option>, + network_provider: Arc, + is_validator: bool, + custom_extensions: Box Vec> + Send>, } -impl OffchainWorkers { +impl OffchainWorkers { /// Creates new [`OffchainWorkers`]. - pub fn new(client: Arc) -> Self { - Self::new_with_options(client, OffchainWorkerOptions { enable_http_requests: true }) - } - - /// Creates new [`OffchainWorkers`] using the given `options`. - pub fn new_with_options(client: Arc, options: OffchainWorkerOptions) -> Self { + pub fn new Vec> + Send + 'static>( + OffchainWorkerOptions { + runtime_api_provider, + keystore, + offchain_db, + transaction_pool, + network_provider, + is_validator, + enable_http_requests, + custom_extensions, + }: OffchainWorkerOptions, + ) -> Self { Self { - client, - _block: PhantomData, + runtime_api_provider, thread_pool: Mutex::new(ThreadPool::with_name( "offchain-worker".into(), num_cpus::get(), )), shared_http_client: api::SharedClient::new(), - enable_http: options.enable_http_requests, + enable_http_requests, + keystore, + offchain_db: offchain_db.map(OffchainDb::new), + transaction_pool, + is_validator, + network_provider, + custom_extensions: Box::new(custom_extensions), } } } -impl fmt::Debug for OffchainWorkers { +impl fmt::Debug + for OffchainWorkers +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("OffchainWorkers").finish() } } -impl OffchainWorkers +impl OffchainWorkers where Block: traits::Block, - Client: ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: OffchainWorkerApi, + RA: ProvideRuntimeApi + Send + Sync + 'static, + RA::Api: OffchainWorkerApi, + Storage: offchain::OffchainStorage + 'static, { + /// Run the offchain workers on every block import. + pub async fn run>( + self, + import_events: Arc, + spawner: impl SpawnNamed, + ) { + import_events + .import_notification_stream() + .for_each(move |n| { + if n.is_new_best { + spawner.spawn( + "offchain-on-block", + Some("offchain-worker"), + self.on_block_imported(&n.header).boxed(), + ); + } else { + tracing::debug!( + target: LOG_TARGET, + "Skipping offchain workers for non-canon block: {:?}", + n.header, + ) + } + + ready(()) + }) + .await; + } + /// Start the offchain workers after given block. #[must_use] - pub fn on_block_imported( - &self, - header: &Block::Header, - network_provider: Arc, - is_validator: bool, - ) -> impl Future { - let runtime = self.client.runtime_api(); + fn on_block_imported(&self, header: &Block::Header) -> impl Future { + let runtime = self.runtime_api_provider.runtime_api(); let hash = header.hash(); let has_api_v1 = runtime.has_api_with::, _>(hash, |v| v == 1); let has_api_v2 = runtime.has_api_with::, _>(hash, |v| v == 2); @@ -140,36 +239,59 @@ where }; tracing::debug!( target: LOG_TARGET, - "Checking offchain workers at {:?}: version:{}", - hash, - version + "Checking offchain workers at {hash:?}: version: {version}", ); + let process = (version > 0).then(|| { - let (api, runner) = - api::AsyncApi::new(network_provider, is_validator, self.shared_http_client.clone()); - tracing::debug!(target: LOG_TARGET, "Spawning offchain workers at {:?}", hash); + let (api, runner) = api::AsyncApi::new( + self.network_provider.clone(), + self.is_validator, + self.shared_http_client.clone(), + ); + tracing::debug!(target: LOG_TARGET, "Spawning offchain workers at {hash:?}"); let header = header.clone(); - let client = self.client.clone(); + let client = self.runtime_api_provider.clone(); let mut capabilities = offchain::Capabilities::all(); + capabilities.set(offchain::Capabilities::HTTP, self.enable_http_requests); + + let keystore = self.keystore.clone(); + let db = self.offchain_db.clone(); + let tx_pool = self.transaction_pool.clone(); + let custom_extensions = (*self.custom_extensions)(hash); - capabilities.set(offchain::Capabilities::HTTP, self.enable_http); self.spawn_worker(move || { - let runtime = client.runtime_api(); + let mut runtime = client.runtime_api(); let api = Box::new(api); - tracing::debug!(target: LOG_TARGET, "Running offchain workers at {:?}", hash); + tracing::debug!(target: LOG_TARGET, "Running offchain workers at {hash:?}"); + + if let Some(keystore) = keystore { + runtime.register_extension(KeystoreExt(keystore.clone())); + } + + if let Some(pool) = tx_pool { + runtime.register_extension(pool.offchain_transaction_pool(hash)); + } + + if let Some(offchain_db) = db { + runtime.register_extension(offchain::OffchainDbExt::new( + offchain::LimitedExternalities::new(capabilities, offchain_db.clone()), + )); + } + + runtime.register_extension(offchain::OffchainWorkerExt::new( + offchain::LimitedExternalities::new(capabilities, api), + )); + + custom_extensions.into_iter().for_each(|ext| runtime.register_extension(ext)); - let context = ExecutionContext::OffchainCall(Some((api, capabilities))); let run = if version == 2 { - runtime.offchain_worker_with_context(hash, context, &header) + runtime.offchain_worker(hash, &header) } else { #[allow(deprecated)] - runtime.offchain_worker_before_version_2_with_context( - hash, - context, - *header.number(), - ) + runtime.offchain_worker_before_version_2(hash, *header.number()) }; + if let Err(e) = run { tracing::error!( target: LOG_TARGET, @@ -201,44 +323,6 @@ where } } -/// Inform the offchain worker about new imported blocks -pub async fn notification_future( - is_validator: bool, - client: Arc, - offchain: Arc>, - spawner: Spawner, - network_provider: Arc, -) where - Block: traits::Block, - Client: - ProvideRuntimeApi + sc_client_api::BlockchainEvents + Send + Sync + 'static, - Client::Api: OffchainWorkerApi, - Spawner: SpawnNamed, -{ - client - .import_notification_stream() - .for_each(move |n| { - if n.is_new_best { - spawner.spawn( - "offchain-on-block", - Some("offchain-worker"), - offchain - .on_block_imported(&n.header, network_provider.clone(), is_validator) - .boxed(), - ); - } else { - tracing::debug!( - target: LOG_TARGET, - "Skipping offchain workers for non-canon block: {:?}", - n.header, - ) - } - - ready(()) - }) - .await; -} - #[cfg(test)] mod tests { use super::*; @@ -246,18 +330,16 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName}; - use sc_peerset::ReputationChange; - use sc_transaction_pool::{BasicPool, FullChainApi}; + use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName, ReputationChange}; + use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use sp_runtime::generic::BlockId; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::{ - substrate_test_pallet::pallet::Call as PalletCall, Block, ExtrinsicBuilder, RuntimeCall, + substrate_test_pallet::pallet::Call as PalletCall, ExtrinsicBuilder, RuntimeCall, }, - ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, + ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilderExt, }; struct TestNetwork(); @@ -329,66 +411,47 @@ mod tests { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn add_to_peers_set( + fn remove_peers_from_reserved_set( &self, _protocol: ProtocolName, - _peers: HashSet, + _peers: Vec, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - fn sync_num_connected(&self) -> usize { unimplemented!(); } } - struct TestPool(Arc, Block>>); - - impl sc_transaction_pool_api::OffchainSubmitTransaction for TestPool { - fn submit_at( - &self, - at: &BlockId, - extrinsic: ::Extrinsic, - ) -> Result<(), ()> { - let source = sc_transaction_pool_api::TransactionSource::Local; - futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) - .map(|_| ()) - .map_err(|_| ()) - } - } - #[test] fn should_call_into_runtime_and_produce_extrinsic() { sp_tracing::try_init_simple(); let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = TestPool(BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - )); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let network = Arc::new(TestNetwork()); let header = client.header(client.chain_info().genesis_hash).unwrap().unwrap(); // when - let offchain = OffchainWorkers::new(client); - futures::executor::block_on(offchain.on_block_imported(&header, network, false)); + let offchain = OffchainWorkers::new(OffchainWorkerOptions { + runtime_api_provider: client, + keystore: None, + offchain_db: None::, + transaction_pool: Some(OffchainTransactionPoolFactory::new(pool.clone())), + network_provider: network, + is_validator: false, + enable_http_requests: false, + custom_extensions: |_| Vec::new(), + }); + futures::executor::block_on(offchain.on_block_imported(&header)); // then - assert_eq!(pool.0.status().ready, 1); + assert_eq!(pool.status().ready, 1); assert!(matches!( - pool.0.ready().next().unwrap().data().function, + pool.ready().next().unwrap().data().function, RuntimeCall::SubstrateTest(PalletCall::storage_change { .. }) )); } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml deleted file mode 100644 index 043f8a8352caa..0000000000000 --- a/client/peerset/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -description = "Connectivity manager based on reputation" -homepage = "https://substrate.io" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -name = "sc-peerset" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -repository = "https://github.com/paritytech/substrate/" -documentation = "https://docs.rs/sc-peerset" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -futures = "0.3.21" -libp2p-identity = { version = "0.1.2", features = ["peerid", "ed25519"] } -log = "0.4.17" -serde_json = "1.0.85" -wasm-timer = "0.2" -sc-utils = { version = "4.0.0-dev", path = "../utils" } - -[dev-dependencies] -rand = "0.8.5" diff --git a/client/peerset/README.md b/client/peerset/README.md deleted file mode 100644 index 1b54c52001caf..0000000000000 --- a/client/peerset/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be -connected to. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs deleted file mode 100644 index e169be8e8ed5b..0000000000000 --- a/client/peerset/src/lib.rs +++ /dev/null @@ -1,999 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be -//! connected to. -//! -//! The PSM handles *sets* of nodes. A set of nodes is defined as the nodes that are believed to -//! support a certain capability, such as handling blocks and transactions of a specific chain, -//! or collating a certain parachain. -//! -//! For each node in each set, the peerset holds a flag specifying whether the node is -//! connected to us or not. -//! -//! This connected/disconnected status is specific to the node and set combination, and it is for -//! example possible for a node to be connected through a specific set but not another. -//! -//! In addition, for each, set, the peerset also holds a list of reserved nodes towards which it -//! will at all time try to maintain a connection with. - -mod peersstate; - -use futures::{channel::oneshot, prelude::*}; -use log::{debug, error, trace}; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use serde_json::json; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - pin::Pin, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use wasm_timer::Delay; - -pub use libp2p_identity::PeerId; - -/// We don't accept nodes whose reputation is under this value. -pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); -/// Reputation change for a node when we get disconnected from it. -const DISCONNECT_REPUTATION_CHANGE: i32 = -256; -/// Amount of time between the moment we disconnect from a node and the moment we remove it from -/// the list. -const FORGET_AFTER: Duration = Duration::from_secs(3600); - -#[derive(Debug)] -enum Action { - AddReservedPeer(SetId, PeerId), - RemoveReservedPeer(SetId, PeerId), - SetReservedPeers(SetId, HashSet), - SetReservedOnly(SetId, bool), - ReportPeer(PeerId, ReputationChange), - AddToPeersSet(SetId, PeerId), - RemoveFromPeersSet(SetId, PeerId), - PeerReputation(PeerId, oneshot::Sender), -} - -/// Identifier of a set in the peerset. -/// -/// Can be constructed using the `From` trait implementation based on the index of the set -/// within [`PeersetConfig::sets`]. For example, the first element of [`PeersetConfig::sets`] is -/// later referred to with `SetId::from(0)`. It is intended that the code responsible for building -/// the [`PeersetConfig`] is also responsible for constructing the [`SetId`]s. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SetId(usize); - -impl SetId { - pub const fn from(id: usize) -> Self { - Self(id) - } -} - -impl From for SetId { - fn from(id: usize) -> Self { - Self(id) - } -} - -impl From for usize { - fn from(id: SetId) -> Self { - id.0 - } -} - -/// Description of a reputation adjustment for a node. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ReputationChange { - /// Reputation delta. - pub value: i32, - /// Reason for reputation change. - pub reason: &'static str, -} - -impl ReputationChange { - /// New reputation change with given delta and reason. - pub const fn new(value: i32, reason: &'static str) -> ReputationChange { - Self { value, reason } - } - - /// New reputation change that forces minimum possible reputation. - pub const fn new_fatal(reason: &'static str) -> ReputationChange { - Self { value: i32::MIN, reason } - } -} - -/// Shared handle to the peer set manager (PSM). Distributed around the code. -#[derive(Debug, Clone)] -pub struct PeersetHandle { - tx: TracingUnboundedSender, -} - -impl PeersetHandle { - /// Adds a new reserved peer. The peerset will make an effort to always remain connected to - /// this peer. - /// - /// Has no effect if the node was already a reserved peer. - /// - /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. - pub fn add_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddReservedPeer(set_id, peer_id)); - } - - /// Remove a previously-added reserved peer. - /// - /// Has no effect if the node was not a reserved peer. - pub fn remove_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(set_id, peer_id)); - } - - /// Sets whether or not the peerset only has connections with nodes marked as reserved for - /// the given set. - pub fn set_reserved_only(&self, set_id: SetId, reserved: bool) { - let _ = self.tx.unbounded_send(Action::SetReservedOnly(set_id, reserved)); - } - - /// Set reserved peers to the new set. - pub fn set_reserved_peers(&self, set_id: SetId, peer_ids: HashSet) { - let _ = self.tx.unbounded_send(Action::SetReservedPeers(set_id, peer_ids)); - } - - /// Reports an adjustment to the reputation of the given peer. - pub fn report_peer(&self, peer_id: PeerId, score_diff: ReputationChange) { - let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); - } - - /// Add a peer to a set. - pub fn add_to_peers_set(&self, set_id: SetId, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddToPeersSet(set_id, peer_id)); - } - - /// Remove a peer from a set. - pub fn remove_from_peers_set(&self, set_id: SetId, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveFromPeersSet(set_id, peer_id)); - } - - /// Returns the reputation value of the peer. - pub async fn peer_reputation(self, peer_id: PeerId) -> Result { - let (tx, rx) = oneshot::channel(); - - let _ = self.tx.unbounded_send(Action::PeerReputation(peer_id, tx)); - - // The channel can only be closed if the peerset no longer exists. - rx.await.map_err(|_| ()) - } -} - -/// Message that can be sent by the peer set manager (PSM). -#[derive(Debug, PartialEq)] -pub enum Message { - /// Request to open a connection to the given peer. From the point of view of the PSM, we are - /// immediately connected. - Connect { - set_id: SetId, - /// Peer to connect to. - peer_id: PeerId, - }, - - /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. - Drop { - set_id: SetId, - /// Peer to disconnect from. - peer_id: PeerId, - }, - - /// Equivalent to `Connect` for the peer corresponding to this incoming index. - Accept(IncomingIndex), - - /// Equivalent to `Drop` for the peer corresponding to this incoming index. - Reject(IncomingIndex), -} - -/// Opaque identifier for an incoming connection. Allocated by the network. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct IncomingIndex(pub u64); - -impl From for IncomingIndex { - fn from(val: u64) -> Self { - Self(val) - } -} - -/// Configuration to pass when creating the peer set manager. -#[derive(Debug)] -pub struct PeersetConfig { - /// List of sets of nodes the peerset manages. - pub sets: Vec, -} - -/// Configuration for a single set of nodes. -#[derive(Debug)] -pub struct SetConfig { - /// Maximum number of ingoing links to peers. - pub in_peers: u32, - - /// Maximum number of outgoing links to peers. - pub out_peers: u32, - - /// List of bootstrap nodes to initialize the set with. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub bootnodes: Vec, - - /// Lists of nodes we should always be connected to. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub reserved_nodes: HashSet, - - /// If true, we only accept nodes in [`SetConfig::reserved_nodes`]. - pub reserved_only: bool, -} - -/// Side of the peer set manager owned by the network. In other words, the "receiving" side. -/// -/// Implements the `Stream` trait and can be polled for messages. The `Stream` never ends and never -/// errors. -#[derive(Debug)] -pub struct Peerset { - /// Underlying data structure for the nodes's states. - data: peersstate::PeersState, - /// For each set, lists of nodes that don't occupy slots and that we should try to always be - /// connected to, and whether only reserved nodes are accepted. Is kept in sync with the list - /// of non-slot-occupying nodes in [`Peerset::data`]. - reserved_nodes: Vec<(HashSet, bool)>, - /// Receiver for messages from the `PeersetHandle` and from `tx`. - rx: TracingUnboundedReceiver, - /// Sending side of `rx`. - tx: TracingUnboundedSender, - /// Queue of messages to be emitted when the `Peerset` is polled. - message_queue: VecDeque, - /// When the `Peerset` was created. - created: Instant, - /// Last time when we updated the reputations of connected nodes. - latest_time_update: Instant, - /// Next time to do a periodic call to `alloc_slots` with all sets. This is done once per - /// second, to match the period of the reputation updates. - next_periodic_alloc_slots: Delay, -} - -impl Peerset { - /// Builds a new peerset from the given configuration. - pub fn from_config(config: PeersetConfig) -> (Self, PeersetHandle) { - let (tx, rx) = tracing_unbounded("mpsc_peerset_messages", 10_000); - - let handle = PeersetHandle { tx: tx.clone() }; - - let mut peerset = { - let now = Instant::now(); - - Self { - data: peersstate::PeersState::new(config.sets.iter().map(|set| { - peersstate::SetConfig { in_peers: set.in_peers, out_peers: set.out_peers } - })), - tx, - rx, - reserved_nodes: config - .sets - .iter() - .map(|set| (set.reserved_nodes.clone(), set.reserved_only)) - .collect(), - message_queue: VecDeque::new(), - created: now, - latest_time_update: now, - next_periodic_alloc_slots: Delay::new(Duration::new(0, 0)), - } - }; - - for (set, set_config) in config.sets.into_iter().enumerate() { - for node in set_config.reserved_nodes { - peerset.data.add_no_slot_node(set, node); - } - - for peer_id in set_config.bootnodes { - if let peersstate::Peer::Unknown(entry) = peerset.data.peer(set, &peer_id) { - entry.discover(); - } else { - debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); - } - } - } - - for set_index in 0..peerset.data.num_sets() { - peerset.alloc_slots(SetId(set_index)); - } - - (peerset, handle) - } - - fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { - let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id); - if !newly_inserted { - return - } - - self.data.add_no_slot_node(set_id.0, peer_id); - self.alloc_slots(set_id); - } - - fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { - if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { - return - } - - self.data.remove_no_slot_node(set_id.0, &peer_id); - - // Nothing more to do if not in reserved-only mode. - if !self.reserved_nodes[set_id.0].1 { - return - } - - // If, however, the peerset is in reserved-only mode, then the removed node needs to be - // disconnected. - if let peersstate::Peer::Connected(peer) = self.data.peer(set_id.0, &peer_id) { - peer.disconnect(); - self.message_queue.push_back(Message::Drop { set_id, peer_id }); - } - } - - fn on_set_reserved_peers(&mut self, set_id: SetId, peer_ids: HashSet) { - // Determine the difference between the current group and the new list. - let (to_insert, to_remove) = { - let to_insert = peer_ids - .difference(&self.reserved_nodes[set_id.0].0) - .cloned() - .collect::>(); - let to_remove = self.reserved_nodes[set_id.0] - .0 - .difference(&peer_ids) - .cloned() - .collect::>(); - (to_insert, to_remove) - }; - - for node in to_insert { - self.on_add_reserved_peer(set_id, node); - } - - for node in to_remove { - self.on_remove_reserved_peer(set_id, node); - } - } - - fn on_set_reserved_only(&mut self, set_id: SetId, reserved_only: bool) { - self.reserved_nodes[set_id.0].1 = reserved_only; - - if reserved_only { - // Disconnect all the nodes that aren't reserved. - for peer_id in - self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() - { - if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - continue - } - - let peer = self.data.peer(set_id.0, &peer_id).into_connected().expect( - "We are enumerating connected peers, therefore the peer is connected; qed", - ); - peer.disconnect(); - self.message_queue.push_back(Message::Drop { set_id, peer_id }); - } - } else { - self.alloc_slots(set_id); - } - } - - /// Returns the list of reserved peers. - pub fn reserved_peers(&self, set_id: SetId) -> impl Iterator { - self.reserved_nodes[set_id.0].0.iter() - } - - /// Adds a node to the given set. The peerset will, if possible and not already the case, - /// try to connect to it. - /// - /// > **Note**: This has the same effect as [`PeersetHandle::add_to_peers_set`]. - pub fn add_to_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { - if let peersstate::Peer::Unknown(entry) = self.data.peer(set_id.0, &peer_id) { - entry.discover(); - self.alloc_slots(set_id); - } - } - - fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { - // Don't do anything if node is reserved. - if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - return - } - - match self.data.peer(set_id.0, &peer_id) { - peersstate::Peer::Connected(peer) => { - self.message_queue.push_back(Message::Drop { set_id, peer_id: *peer.peer_id() }); - peer.disconnect().forget_peer(); - }, - peersstate::Peer::NotConnected(peer) => { - peer.forget_peer(); - }, - peersstate::Peer::Unknown(_) => {}, - } - } - - fn on_report_peer(&mut self, peer_id: PeerId, change: ReputationChange) { - // We want reputations to be up-to-date before adjusting them. - self.update_time(); - - let mut reputation = self.data.peer_reputation(peer_id); - reputation.add_reputation(change.value); - if reputation.reputation() >= BANNED_THRESHOLD { - trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", - peer_id, change.value, reputation.reputation(), change.reason - ); - return - } - - debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", - peer_id, change.value, reputation.reputation(), change.reason - ); - - drop(reputation); - - for set_index in 0..self.data.num_sets() { - if let peersstate::Peer::Connected(peer) = self.data.peer(set_index, &peer_id) { - let peer = peer.disconnect(); - self.message_queue.push_back(Message::Drop { - set_id: SetId(set_index), - peer_id: peer.into_peer_id(), - }); - - self.alloc_slots(SetId(set_index)); - } - } - } - - fn on_peer_reputation(&mut self, peer_id: PeerId, pending_response: oneshot::Sender) { - let reputation = self.data.peer_reputation(peer_id); - let _ = pending_response.send(reputation.reputation()); - } - - /// Updates the value of `self.latest_time_update` and performs all the updates that happen - /// over time, such as reputation increases for staying connected. - fn update_time(&mut self) { - let now = Instant::now(); - - // We basically do `(now - self.latest_update).as_secs()`, except that by the way we do it - // we know that we're not going to miss seconds because of rounding to integers. - let secs_diff = { - let elapsed_latest = self.latest_time_update - self.created; - let elapsed_now = now - self.created; - self.latest_time_update = now; - elapsed_now.as_secs() - elapsed_latest.as_secs() - }; - - // For each elapsed second, move the node reputation towards zero. - // If we multiply each second the reputation by `k` (where `k` is between 0 and 1), it - // takes `ln(0.5) / ln(k)` seconds to reduce the reputation by half. Use this formula to - // empirically determine a value of `k` that looks correct. - for _ in 0..secs_diff { - for peer_id in self.data.peers().cloned().collect::>() { - // We use `k = 0.98`, so we divide by `50`. With that value, it takes 34.3 seconds - // to reduce the reputation by half. - fn reput_tick(reput: i32) -> i32 { - let mut diff = reput / 50; - if diff == 0 && reput < 0 { - diff = -1; - } else if diff == 0 && reput > 0 { - diff = 1; - } - reput.saturating_sub(diff) - } - - let mut peer_reputation = self.data.peer_reputation(peer_id); - - let before = peer_reputation.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer_reputation.set_reputation(after); - - if after != 0 { - continue - } - - drop(peer_reputation); - - // If the peer reaches a reputation of 0, and there is no connection to it, - // forget it. - for set_index in 0..self.data.num_sets() { - match self.data.peer(set_index, &peer_id) { - peersstate::Peer::Connected(_) => {}, - peersstate::Peer::NotConnected(peer) => { - if peer.last_connected_or_discovered() + FORGET_AFTER < now { - peer.forget_peer(); - } - }, - peersstate::Peer::Unknown(_) => { - // Happens if this peer does not belong to this set. - }, - } - } - } - } - } - - /// Try to fill available out slots with nodes for the given set. - fn alloc_slots(&mut self, set_id: SetId) { - self.update_time(); - - // Try to connect to all the reserved nodes that we are not connected to. - for reserved_node in &self.reserved_nodes[set_id.0].0 { - let entry = match self.data.peer(set_id.0, reserved_node) { - peersstate::Peer::Unknown(n) => n.discover(), - peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => continue, - }; - - // Don't connect to nodes with an abysmal reputation, even if they're reserved. - // This is a rather opinionated behaviour, and it wouldn't be fundamentally wrong to - // remove that check. If necessary, the peerset should be refactored to give more - // control over what happens in that situation. - if entry.reputation() < BANNED_THRESHOLD { - break - } - - match entry.try_outgoing() { - Ok(conn) => self - .message_queue - .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), - Err(_) => { - // An error is returned only if no slot is available. Reserved nodes are - // marked in the state machine with a flag saying "doesn't occupy a slot", - // and as such this should never happen. - debug_assert!(false); - log::error!( - target: "peerset", - "Not enough slots to connect to reserved node" - ); - }, - } - } - - // Now, we try to connect to other nodes. - - // Nothing more to do if we're in reserved mode. - if self.reserved_nodes[set_id.0].1 { - return - } - - // Try to grab the next node to attempt to connect to. - // Since `highest_not_connected_peer` is rather expensive to call, check beforehand - // whether we have an available slot. - while self.data.has_free_outgoing_slot(set_id.0) { - let next = match self.data.highest_not_connected_peer(set_id.0) { - Some(n) => n, - None => break, - }; - - // Don't connect to nodes with an abysmal reputation. - if next.reputation() < BANNED_THRESHOLD { - break - } - - match next.try_outgoing() { - Ok(conn) => self - .message_queue - .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), - Err(_) => { - // This branch can only be entered if there is no free slot, which is - // checked above. - debug_assert!(false); - break - }, - } - } - } - - /// Indicate that we received an incoming connection. Must be answered either with - /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. - /// - /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming - /// connection implicitly means `Connect`, but incoming connections aren't cancelled by - /// `dropped`. - // Implementation note: because of concurrency issues, it is possible that we push a `Connect` - // message to the output channel with a `PeerId`, and that `incoming` gets called with the same - // `PeerId` before that message has been read by the user. In this situation we must not answer. - pub fn incoming(&mut self, set_id: SetId, peer_id: PeerId, index: IncomingIndex) { - trace!(target: "peerset", "Incoming {:?}", peer_id); - - self.update_time(); - - if self.reserved_nodes[set_id.0].1 && !self.reserved_nodes[set_id.0].0.contains(&peer_id) { - self.message_queue.push_back(Message::Reject(index)); - return - } - - let not_connected = match self.data.peer(set_id.0, &peer_id) { - // If we're already connected, don't answer, as the docs mention. - peersstate::Peer::Connected(_) => return, - peersstate::Peer::NotConnected(mut entry) => { - entry.bump_last_connected_or_discovered(); - entry - }, - peersstate::Peer::Unknown(entry) => entry.discover(), - }; - - if not_connected.reputation() < BANNED_THRESHOLD { - self.message_queue.push_back(Message::Reject(index)); - return - } - - match not_connected.try_accept_incoming() { - Ok(_) => self.message_queue.push_back(Message::Accept(index)), - Err(_) => self.message_queue.push_back(Message::Reject(index)), - } - } - - /// Indicate that we dropped an active connection with a peer, or that we failed to connect. - /// - /// Must only be called after the PSM has either generated a `Connect` message with this - /// `PeerId`, or accepted an incoming connection with this `PeerId`. - pub fn dropped(&mut self, set_id: SetId, peer_id: PeerId, reason: DropReason) { - // We want reputations to be up-to-date before adjusting them. - self.update_time(); - - match self.data.peer(set_id.0, &peer_id) { - peersstate::Peer::Connected(mut entry) => { - // Decrease the node's reputation so that we don't try it again and again and again. - entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); - trace!(target: "peerset", "Dropping {}: {:+} to {}", - peer_id, DISCONNECT_REPUTATION_CHANGE, entry.reputation()); - entry.disconnect(); - }, - peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => { - error!(target: "peerset", "Received dropped() for non-connected node") - }, - } - - if let DropReason::Refused = reason { - self.on_remove_from_peers_set(set_id, peer_id); - } - - self.alloc_slots(set_id); - } - - /// Reports an adjustment to the reputation of the given peer. - pub fn report_peer(&mut self, peer_id: PeerId, score_diff: ReputationChange) { - // We don't immediately perform the adjustments in order to have state consistency. We - // don't want the reporting here to take priority over messages sent using the - // `PeersetHandle`. - let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); - } - - /// Produces a JSON object containing the state of the peerset manager, for debugging purposes. - pub fn debug_info(&mut self) -> serde_json::Value { - self.update_time(); - - json!({ - "sets": (0..self.data.num_sets()).map(|set_index| { - json!({ - "nodes": self.data.peers().cloned().collect::>().into_iter().filter_map(|peer_id| { - let state = match self.data.peer(set_index, &peer_id) { - peersstate::Peer::Connected(entry) => json!({ - "connected": true, - "reputation": entry.reputation() - }), - peersstate::Peer::NotConnected(entry) => json!({ - "connected": false, - "reputation": entry.reputation() - }), - peersstate::Peer::Unknown(_) => return None, - }; - - Some((peer_id.to_base58(), state)) - }).collect::>(), - "reserved_nodes": self.reserved_nodes[set_index].0.iter().map(|peer_id| { - peer_id.to_base58() - }).collect::>(), - "reserved_only": self.reserved_nodes[set_index].1, - }) - }).collect::>(), - "message_queue": self.message_queue.len(), - }) - } - - /// Returns the number of peers that we have discovered. - pub fn num_discovered_peers(&self) -> usize { - self.data.peers().len() - } -} - -impl Stream for Peerset { - type Item = Message; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - loop { - if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)) - } - - if Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx).is_ready() { - self.next_periodic_alloc_slots = Delay::new(Duration::new(1, 0)); - - for set_index in 0..self.data.num_sets() { - self.alloc_slots(SetId(set_index)); - } - } - - let action = match Stream::poll_next(Pin::new(&mut self.rx), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Some(event)) => event, - Poll::Ready(None) => return Poll::Pending, - }; - - match action { - Action::AddReservedPeer(set_id, peer_id) => - self.on_add_reserved_peer(set_id, peer_id), - Action::RemoveReservedPeer(set_id, peer_id) => - self.on_remove_reserved_peer(set_id, peer_id), - Action::SetReservedPeers(set_id, peer_ids) => - self.on_set_reserved_peers(set_id, peer_ids), - Action::SetReservedOnly(set_id, reserved) => - self.on_set_reserved_only(set_id, reserved), - Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), - Action::AddToPeersSet(sets_name, peer_id) => - self.add_to_peers_set(sets_name, peer_id), - Action::RemoveFromPeersSet(sets_name, peer_id) => - self.on_remove_from_peers_set(sets_name, peer_id), - Action::PeerReputation(peer_id, pending_response) => - self.on_peer_reputation(peer_id, pending_response), - } - } - } -} - -/// Reason for calling [`Peerset::dropped`]. -pub enum DropReason { - /// Substream or connection has been closed for an unknown reason. - Unknown, - /// Substream or connection has been explicitly refused by the target. In other words, the - /// peer doesn't actually belong to this set. - /// - /// This has the side effect of calling [`PeersetHandle::remove_from_peers_set`]. - Refused, -} - -#[cfg(test)] -mod tests { - use super::{ - IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, - BANNED_THRESHOLD, - }; - use futures::prelude::*; - use libp2p_identity::PeerId; - use std::{pin::Pin, task::Poll, thread, time::Duration}; - - fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { - for expected_message in messages { - let (message, p) = next_message(peerset).expect("expected message"); - assert_eq!(message, expected_message); - peerset = p; - } - peerset - } - - fn next_message(mut peerset: Peerset) -> Result<(Message, Peerset), ()> { - let next = futures::executor::block_on_stream(&mut peerset).next(); - let message = next.ok_or(())?; - Ok((message, peerset)) - } - - #[test] - fn test_peerset_add_reserved_peer() { - let bootnode = PeerId::random(); - let reserved_peer = PeerId::random(); - let reserved_peer2 = PeerId::random(); - let config = PeersetConfig { - sets: vec![SetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode], - reserved_nodes: Default::default(), - reserved_only: true, - }], - }; - - let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(SetId::from(0), reserved_peer); - handle.add_reserved_peer(SetId::from(0), reserved_peer2); - - assert_messages( - peerset, - vec![ - Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer }, - Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer2 }, - ], - ); - } - - #[test] - fn test_peerset_incoming() { - let bootnode = PeerId::random(); - let incoming = PeerId::random(); - let incoming2 = PeerId::random(); - let incoming3 = PeerId::random(); - let ii = IncomingIndex(1); - let ii2 = IncomingIndex(2); - let ii3 = IncomingIndex(3); - let ii4 = IncomingIndex(3); - let config = PeersetConfig { - sets: vec![SetConfig { - in_peers: 2, - out_peers: 1, - bootnodes: vec![bootnode], - reserved_nodes: Default::default(), - reserved_only: false, - }], - }; - - let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming, ii); - peerset.incoming(SetId::from(0), incoming, ii4); - peerset.incoming(SetId::from(0), incoming2, ii2); - peerset.incoming(SetId::from(0), incoming3, ii3); - - assert_messages( - peerset, - vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, - Message::Accept(ii), - Message::Accept(ii2), - Message::Reject(ii3), - ], - ); - } - - #[test] - fn test_peerset_reject_incoming_in_reserved_only() { - let incoming = PeerId::random(); - let ii = IncomingIndex(1); - let config = PeersetConfig { - sets: vec![SetConfig { - in_peers: 50, - out_peers: 50, - bootnodes: vec![], - reserved_nodes: Default::default(), - reserved_only: true, - }], - }; - - let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming, ii); - - assert_messages(peerset, vec![Message::Reject(ii)]); - } - - #[test] - fn test_peerset_discovered() { - let bootnode = PeerId::random(); - let discovered = PeerId::random(); - let discovered2 = PeerId::random(); - let config = PeersetConfig { - sets: vec![SetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode], - reserved_nodes: Default::default(), - reserved_only: false, - }], - }; - - let (mut peerset, _handle) = Peerset::from_config(config); - peerset.add_to_peers_set(SetId::from(0), discovered); - peerset.add_to_peers_set(SetId::from(0), discovered); - peerset.add_to_peers_set(SetId::from(0), discovered2); - - assert_messages( - peerset, - vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, - Message::Connect { set_id: SetId::from(0), peer_id: discovered }, - ], - ); - } - - #[test] - fn test_peerset_banned() { - let (mut peerset, handle) = Peerset::from_config(PeersetConfig { - sets: vec![SetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: vec![], - reserved_nodes: Default::default(), - reserved_only: false, - }], - }); - - // We ban a node by setting its reputation under the threshold. - let peer_id = PeerId::random(); - handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); - - let fut = futures::future::poll_fn(move |cx| { - // We need one polling for the message to be processed. - assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); - - // Check that an incoming connection from that node gets refused. - peerset.incoming(SetId::from(0), peer_id, IncomingIndex(1)); - if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); - } else { - panic!() - } - - // Wait a bit for the node's reputation to go above the threshold. - thread::sleep(Duration::from_millis(1500)); - - // Try again. This time the node should be accepted. - peerset.incoming(SetId::from(0), peer_id, IncomingIndex(2)); - while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Accept(IncomingIndex(2))); - } - - Poll::Ready(()) - }); - - futures::executor::block_on(fut); - } - - #[test] - fn test_relloc_after_banned() { - let (mut peerset, handle) = Peerset::from_config(PeersetConfig { - sets: vec![SetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: vec![], - reserved_nodes: Default::default(), - reserved_only: false, - }], - }); - - // We ban a node by setting its reputation under the threshold. - let peer_id = PeerId::random(); - handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); - - let fut = futures::future::poll_fn(move |cx| { - // We need one polling for the message to be processed. - assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); - - // Check that an incoming connection from that node gets refused. - // This is already tested in other tests, but it is done again here because it doesn't - // hurt. - peerset.incoming(SetId::from(0), peer_id, IncomingIndex(1)); - if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); - } else { - panic!() - } - - // Wait for the peerset to change its mind and actually connect to it. - while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Connect { set_id: SetId::from(0), peer_id }); - } - - Poll::Ready(()) - }); - - futures::executor::block_on(fut); - } -} diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs deleted file mode 100644 index 2d4a9295c24c9..0000000000000 --- a/client/peerset/src/peersstate.rs +++ /dev/null @@ -1,737 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Reputation and slots allocation system behind the peerset. -//! -//! The [`PeersState`] state machine is responsible for managing the reputation and allocating -//! slots. It holds a list of nodes, each associated with a reputation value, a list of sets the -//! node belongs to, and for each set whether we are connected or not to this node. Thanks to this -//! list, it knows how many slots are occupied. It also holds a list of nodes which don't occupy -//! slots. -//! -//! > Note: This module is purely dedicated to managing slots and reputations. Features such as -//! > for example connecting to some nodes in priority should be added outside of this -//! > module, rather than inside. - -use libp2p_identity::PeerId; -use log::error; -use std::{ - borrow::Cow, - collections::{ - hash_map::{Entry, OccupiedEntry}, - HashMap, HashSet, - }, - time::Instant, -}; - -/// State storage behind the peerset. -/// -/// # Usage -/// -/// This struct is nothing more but a data structure containing a list of nodes, where each node -/// has a reputation and is either connected to us or not. -#[derive(Debug, Clone)] -pub struct PeersState { - /// List of nodes that we know about. - /// - /// > **Note**: This list should really be ordered by decreasing reputation, so that we can - /// > easily select the best node to connect to. As a first draft, however, we don't sort, to - /// > make the logic easier. - nodes: HashMap, - - /// Configuration of each set. The size of this `Vec` is never modified. - sets: Vec, -} - -/// Configuration of a single set. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct SetConfig { - /// Maximum allowed number of slot-occupying nodes for ingoing connections. - pub in_peers: u32, - - /// Maximum allowed number of slot-occupying nodes for outgoing connections. - pub out_peers: u32, -} - -/// State of a single set. -#[derive(Debug, Clone, PartialEq, Eq)] -struct SetInfo { - /// Number of slot-occupying nodes for which the `MembershipState` is `In`. - num_in: u32, - - /// Number of slot-occupying nodes for which the `MembershipState` is `In`. - num_out: u32, - - /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `In`. - max_in: u32, - - /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `Out`. - max_out: u32, - - /// List of node identities (discovered or not) that don't occupy slots. - /// - /// Note for future readers: this module is purely dedicated to managing slots. If you are - /// considering adding more features, please consider doing so outside of this module rather - /// than inside. - no_slot_nodes: HashSet, -} - -/// State of a single node that we know about. -#[derive(Debug, Clone, PartialEq, Eq)] -struct Node { - /// List of sets the node belongs to. - /// Always has a fixed size equal to the one of [`PeersState::set`]. The various possible sets - /// are indices into this `Vec`. - sets: Vec, - - /// Reputation value of the node, between `i32::MIN` (we hate that node) and - /// `i32::MAX` (we love that node). - reputation: i32, -} - -impl Node { - fn new(num_sets: usize) -> Self { - Self { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } - } -} - -/// Whether we are connected to a node in the context of a specific set. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -enum MembershipState { - /// Node isn't part of that set. - NotMember, - /// We are connected through an ingoing connection. - In, - /// We are connected through an outgoing connection. - Out, - /// Node is part of that set, but we are not connected to it. - NotConnected { - /// When we were last connected to the node, or if we were never connected when we - /// discovered it. - last_connected: Instant, - }, -} - -impl MembershipState { - /// Returns `true` for [`MembershipState::In`] and [`MembershipState::Out`]. - fn is_connected(self) -> bool { - match self { - Self::In | Self::Out => true, - Self::NotMember | Self::NotConnected { .. } => false, - } - } - - /// Returns `true` for [`MembershipState::NotConnected`]. - fn is_not_connected(self) -> bool { - matches!(self, Self::NotConnected { .. }) - } -} - -impl PeersState { - /// Builds a new empty [`PeersState`]. - pub fn new(sets: impl IntoIterator) -> Self { - Self { - nodes: HashMap::new(), - sets: sets - .into_iter() - .map(|config| SetInfo { - num_in: 0, - num_out: 0, - max_in: config.in_peers, - max_out: config.out_peers, - no_slot_nodes: HashSet::new(), - }) - .collect(), - } - } - - /// Returns the number of sets. - /// - /// Corresponds to the number of elements passed to [`PeersState::new`]. - pub fn num_sets(&self) -> usize { - self.sets.len() - } - - /// Returns an object that grants access to the reputation value of a peer. - pub fn peer_reputation(&mut self, peer_id: PeerId) -> Reputation { - self.nodes.entry(peer_id).or_insert_with(|| Node::new(self.sets.len())); - - let entry = match self.nodes.entry(peer_id) { - Entry::Vacant(_) => unreachable!("guaranteed to be inserted above; qed"), - Entry::Occupied(e) => e, - }; - - Reputation { node: Some(entry) } - } - - /// Returns an object that grants access to the state of a peer in the context of a specific - /// set. - /// - /// # Panic - /// - /// `set` must be within range of the sets passed to [`PeersState::new`]. - pub fn peer<'a>(&'a mut self, set: usize, peer_id: &'a PeerId) -> Peer<'a> { - // The code below will panic anyway if this happens to be false, but this earlier assert - // makes it explicit what is wrong. - assert!(set < self.sets.len()); - - match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { - None | Some(MembershipState::NotMember) => - Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }), - Some(MembershipState::In) | Some(MembershipState::Out) => - Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }), - Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { - state: self, - set, - peer_id: Cow::Borrowed(peer_id), - }), - } - } - - /// Returns the list of all the peers we know of. - // Note: this method could theoretically return a `Peer`, but implementing that - // isn't simple. - pub fn peers(&self) -> impl ExactSizeIterator { - self.nodes.keys() - } - - /// Returns the list of peers we are connected to in the context of a specific set. - /// - /// # Panic - /// - /// `set` must be within range of the sets passed to [`PeersState::new`]. - // Note: this method could theoretically return a `ConnectedPeer`, but implementing that - // isn't simple. - pub fn connected_peers(&self, set: usize) -> impl Iterator { - // The code below will panic anyway if this happens to be false, but this earlier assert - // makes it explicit what is wrong. - assert!(set < self.sets.len()); - - self.nodes - .iter() - .filter(move |(_, p)| p.sets[set].is_connected()) - .map(|(p, _)| p) - } - - /// Returns the peer with the highest reputation and that we are not connected to. - /// - /// If multiple nodes have the same reputation, which one is returned is unspecified. - /// - /// # Panic - /// - /// `set` must be within range of the sets passed to [`PeersState::new`]. - pub fn highest_not_connected_peer(&mut self, set: usize) -> Option { - // The code below will panic anyway if this happens to be false, but this earlier assert - // makes it explicit what is wrong. - assert!(set < self.sets.len()); - - let outcome = self - .nodes - .iter_mut() - .filter(|(_, Node { sets, .. })| sets[set].is_not_connected()) - .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { - if let Some(cur_node) = cur_node.take() { - if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node) - } - } - Some(to_try) - }) - .map(|(peer_id, _)| *peer_id); - - outcome.map(move |peer_id| NotConnectedPeer { - state: self, - set, - peer_id: Cow::Owned(peer_id), - }) - } - - /// Returns `true` if there is a free outgoing slot available related to this set. - pub fn has_free_outgoing_slot(&self, set: usize) -> bool { - self.sets[set].num_out < self.sets[set].max_out - } - - /// Add a node to the list of nodes that don't occupy slots. - /// - /// Has no effect if the node was already in the group. - pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { - // Reminder: `HashSet::insert` returns false if the node was already in the set - if !self.sets[set].no_slot_nodes.insert(peer_id) { - return - } - - if let Some(peer) = self.nodes.get_mut(&peer_id) { - match peer.sets[set] { - MembershipState::In => self.sets[set].num_in -= 1, - MembershipState::Out => self.sets[set].num_out -= 1, - MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, - } - } - } - - /// Removes a node from the list of nodes that don't occupy slots. - /// - /// Has no effect if the node was not in the group. - pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { - // Reminder: `HashSet::remove` returns false if the node was already not in the set - if !self.sets[set].no_slot_nodes.remove(peer_id) { - return - } - - if let Some(peer) = self.nodes.get_mut(peer_id) { - match peer.sets[set] { - MembershipState::In => self.sets[set].num_in += 1, - MembershipState::Out => self.sets[set].num_out += 1, - MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, - } - } - } -} - -/// Grants access to the state of a peer in the [`PeersState`] in the context of a specific set. -pub enum Peer<'a> { - /// We are connected to this node. - Connected(ConnectedPeer<'a>), - /// We are not connected to this node. - NotConnected(NotConnectedPeer<'a>), - /// We have never heard of this node, or it is not part of the set. - Unknown(UnknownPeer<'a>), -} - -impl<'a> Peer<'a> { - /// If we are the `Connected` variant, returns the inner [`ConnectedPeer`]. Returns `None` - /// otherwise. - pub fn into_connected(self) -> Option> { - match self { - Self::Connected(peer) => Some(peer), - Self::NotConnected(..) | Self::Unknown(..) => None, - } - } - - /// If we are the `NotConnected` variant, returns the inner [`NotConnectedPeer`]. Returns `None` - /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn into_not_connected(self) -> Option> { - match self { - Self::NotConnected(peer) => Some(peer), - Self::Connected(..) | Self::Unknown(..) => None, - } - } - - /// If we are the `Unknown` variant, returns the inner [`UnknownPeer`]. Returns `None` - /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn into_unknown(self) -> Option> { - match self { - Self::Unknown(peer) => Some(peer), - Self::Connected(..) | Self::NotConnected(..) => None, - } - } -} - -/// A peer that is connected to us. -pub struct ConnectedPeer<'a> { - state: &'a mut PeersState, - set: usize, - peer_id: Cow<'a, PeerId>, -} - -impl<'a> ConnectedPeer<'a> { - /// Get the `PeerId` associated to this `ConnectedPeer`. - pub fn peer_id(&self) -> &PeerId { - &self.peer_id - } - - /// Destroys this `ConnectedPeer` and returns the `PeerId` inside of it. - pub fn into_peer_id(self) -> PeerId { - self.peer_id.into_owned() - } - - /// Switches the peer to "not connected". - pub fn disconnect(self) -> NotConnectedPeer<'a> { - let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - if !is_no_slot_occupy { - match node.sets[self.set] { - MembershipState::In => self.state.sets[self.set].num_in -= 1, - MembershipState::Out => self.state.sets[self.set].num_out -= 1, - MembershipState::NotMember | MembershipState::NotConnected { .. } => { - debug_assert!( - false, - "State inconsistency: disconnecting a disconnected node" - ) - }, - } - } - node.sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; - } else { - debug_assert!(false, "State inconsistency: disconnecting a disconnected node"); - } - - NotConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id } - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - /// - /// > **Note**: Reputation values aren't specific to a set but are global per peer. - pub fn add_reputation(&mut self, modifier: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = node.reputation.saturating_add(modifier); - } else { - debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); - } - } - - /// Returns the reputation value of the node. - /// - /// > **Note**: Reputation values aren't specific to a set but are global per peer. - pub fn reputation(&self) -> i32 { - self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) - } -} - -/// A peer that is not connected to us. -#[derive(Debug)] -pub struct NotConnectedPeer<'a> { - state: &'a mut PeersState, - set: usize, - peer_id: Cow<'a, PeerId>, -} - -impl<'a> NotConnectedPeer<'a> { - /// Destroys this `NotConnectedPeer` and returns the `PeerId` inside of it. - pub fn into_peer_id(self) -> PeerId { - self.peer_id.into_owned() - } - - /// Bumps the value that `last_connected_or_discovered` would return to now, even if we - /// didn't connect or disconnect. - pub fn bump_last_connected_or_discovered(&mut self) { - let state = match self.state.nodes.get_mut(&*self.peer_id) { - Some(s) => s, - None => return, - }; - - if let MembershipState::NotConnected { last_connected } = &mut state.sets[self.set] { - *last_connected = Instant::now(); - } - } - - /// Returns when we were last connected to this peer, or when we discovered it if we were - /// never connected. - /// - /// Guaranteed to be earlier than calling `Instant::now()` after the function returns. - pub fn last_connected_or_discovered(&self) -> Instant { - let state = match self.state.nodes.get(&*self.peer_id) { - Some(s) => s, - None => { - error!( - target: "peerset", - "State inconsistency with {}; not connected after borrow", - self.peer_id - ); - return Instant::now() - }, - }; - - match state.sets[self.set] { - MembershipState::NotConnected { last_connected } => last_connected, - _ => { - error!(target: "peerset", "State inconsistency with {}", self.peer_id); - Instant::now() - }, - } - } - - /// Tries to set the peer as connected as an outgoing connection. - /// - /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If - /// the slots are full, the node stays "not connected" and we return `Err`. - /// - /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_outgoing(self) -> Result, Self> { - let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); - - // Note that it is possible for num_out to be strictly superior to the max, in case we were - // connected to reserved node then marked them as not reserved. - if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { - return Err(self) - } - - if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.sets[self.set] = MembershipState::Out; - if !is_no_slot_occupy { - self.state.sets[self.set].num_out += 1; - } - } else { - debug_assert!(false, "State inconsistency: try_outgoing on an unknown node"); - } - - Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) - } - - /// Tries to accept the peer as an incoming connection. - /// - /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If - /// the slots are full, the node stays "not connected" and we return `Err`. - /// - /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_accept_incoming(self) -> Result, Self> { - let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); - - // Note that it is possible for num_in to be strictly superior to the max, in case we were - // connected to reserved node then marked them as not reserved. - if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in && - !is_no_slot_occupy - { - return Err(self) - } - - if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.sets[self.set] = MembershipState::In; - if !is_no_slot_occupy { - self.state.sets[self.set].num_in += 1; - } - } else { - debug_assert!(false, "State inconsistency: try_accept_incoming on an unknown node"); - } - - Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) - } - - /// Returns the reputation value of the node. - /// - /// > **Note**: Reputation values aren't specific to a set but are global per peer. - pub fn reputation(&self) -> i32 { - self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) - } - - /// Sets the reputation of the peer. - /// - /// > **Note**: Reputation values aren't specific to a set but are global per peer. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn set_reputation(&mut self, value: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = value; - } else { - debug_assert!(false, "State inconsistency: set_reputation on an unknown node"); - } - } - - /// Removes the peer from the list of members of the set. - pub fn forget_peer(self) -> UnknownPeer<'a> { - if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { - debug_assert!(!matches!(peer.sets[self.set], MembershipState::NotMember)); - peer.sets[self.set] = MembershipState::NotMember; - - // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. - if peer.reputation == 0 && - peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) - { - self.state.nodes.remove(&*self.peer_id); - } - } else { - debug_assert!(false, "State inconsistency: forget_peer on an unknown node"); - error!( - target: "peerset", - "State inconsistency with {} when forgetting peer", - self.peer_id - ); - }; - - UnknownPeer { parent: self.state, set: self.set, peer_id: self.peer_id } - } -} - -/// A peer that we have never heard of or that isn't part of the set. -pub struct UnknownPeer<'a> { - parent: &'a mut PeersState, - set: usize, - peer_id: Cow<'a, PeerId>, -} - -impl<'a> UnknownPeer<'a> { - /// Inserts the peer identity in our list. - /// - /// The node starts with a reputation of 0. You can adjust these default - /// values using the `NotConnectedPeer` that this method returns. - pub fn discover(self) -> NotConnectedPeer<'a> { - let num_sets = self.parent.sets.len(); - - self.parent - .nodes - .entry(self.peer_id.clone().into_owned()) - .or_insert_with(|| Node::new(num_sets)) - .sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; - - NotConnectedPeer { state: self.parent, set: self.set, peer_id: self.peer_id } - } -} - -/// Access to the reputation of a peer. -pub struct Reputation<'a> { - /// Node entry in [`PeersState::nodes`]. Always `Some` except right before dropping. - node: Option>, -} - -impl<'a> Reputation<'a> { - /// Returns the reputation value of the node. - pub fn reputation(&self) -> i32 { - self.node.as_ref().unwrap().get().reputation - } - - /// Sets the reputation of the peer. - pub fn set_reputation(&mut self, value: i32) { - self.node.as_mut().unwrap().get_mut().reputation = value; - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - pub fn add_reputation(&mut self, modifier: i32) { - let reputation = &mut self.node.as_mut().unwrap().get_mut().reputation; - *reputation = reputation.saturating_add(modifier); - } -} - -impl<'a> Drop for Reputation<'a> { - fn drop(&mut self) { - if let Some(node) = self.node.take() { - if node.get().reputation == 0 && - node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) - { - node.remove(); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{Peer, PeersState, SetConfig}; - use libp2p_identity::PeerId; - use std::iter; - - #[test] - fn full_slots_in() { - let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - if let Peer::Unknown(e) = peers_state.peer(0, &id1) { - assert!(e.discover().try_accept_incoming().is_ok()); - } - - if let Peer::Unknown(e) = peers_state.peer(0, &id2) { - assert!(e.discover().try_accept_incoming().is_err()); - } - } - - #[test] - fn no_slot_node_doesnt_use_slot() { - let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - peers_state.add_no_slot_node(0, id1); - if let Peer::Unknown(p) = peers_state.peer(0, &id1) { - assert!(p.discover().try_accept_incoming().is_ok()); - } else { - panic!() - } - - if let Peer::Unknown(e) = peers_state.peer(0, &id2) { - assert!(e.discover().try_accept_incoming().is_ok()); - } else { - panic!() - } - } - - #[test] - fn disconnecting_frees_slot() { - let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - assert!(peers_state - .peer(0, &id1) - .into_unknown() - .unwrap() - .discover() - .try_accept_incoming() - .is_ok()); - assert!(peers_state - .peer(0, &id2) - .into_unknown() - .unwrap() - .discover() - .try_accept_incoming() - .is_err()); - peers_state.peer(0, &id1).into_connected().unwrap().disconnect(); - assert!(peers_state - .peer(0, &id2) - .into_not_connected() - .unwrap() - .try_accept_incoming() - .is_ok()); - } - - #[test] - fn highest_not_connected_peer() { - let mut peers_state = - PeersState::new(iter::once(SetConfig { in_peers: 25, out_peers: 25 })); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - assert!(peers_state.highest_not_connected_peer(0).is_none()); - peers_state.peer(0, &id1).into_unknown().unwrap().discover().set_reputation(50); - peers_state.peer(0, &id2).into_unknown().unwrap().discover().set_reputation(25); - assert_eq!(peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1)); - peers_state.peer(0, &id2).into_not_connected().unwrap().set_reputation(75); - assert_eq!(peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id2)); - peers_state - .peer(0, &id2) - .into_not_connected() - .unwrap() - .try_accept_incoming() - .unwrap(); - assert_eq!(peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1)); - peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(100); - peers_state.peer(0, &id2).into_connected().unwrap().disconnect(); - assert_eq!(peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1)); - peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(-100); - assert_eq!(peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id2)); - } - - #[test] - fn disconnect_no_slot_doesnt_panic() { - let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); - let id = PeerId::random(); - peers_state.add_no_slot_node(0, id); - let peer = peers_state - .peer(0, &id) - .into_unknown() - .unwrap() - .discover() - .try_outgoing() - .unwrap(); - peer.disconnect(); - } -} diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs deleted file mode 100644 index 122f17062577d..0000000000000 --- a/client/peerset/tests/fuzz.rs +++ /dev/null @@ -1,176 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use futures::prelude::*; -use libp2p_identity::PeerId; -use rand::{ - distributions::{Distribution, Uniform, WeightedIndex}, - seq::IteratorRandom, -}; -use sc_peerset::{ - DropReason, IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, -}; -use std::{ - collections::{HashMap, HashSet}, - pin::Pin, - task::Poll, -}; - -#[test] -fn run() { - for _ in 0..50 { - test_once(); - } -} - -fn test_once() { - // PRNG to use. - let mut rng = rand::thread_rng(); - - // Nodes that the peerset knows about. - let mut known_nodes = HashSet::::new(); - // Nodes that we have reserved. Always a subset of `known_nodes`. - let mut reserved_nodes = HashSet::::new(); - - let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { - sets: vec![SetConfig { - bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) - .map(|_| { - let id = PeerId::random(); - known_nodes.insert(id); - id - }) - .collect(), - reserved_nodes: { - (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) - .map(|_| { - let id = PeerId::random(); - known_nodes.insert(id); - reserved_nodes.insert(id); - id - }) - .collect() - }, - in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, - }], - }); - - futures::executor::block_on(futures::future::poll_fn(move |cx| { - // List of nodes the user of `peerset` assumes it's connected to. Always a subset of - // `known_nodes`. - let mut connected_nodes = HashSet::::new(); - // List of nodes the user of `peerset` called `incoming` with and that haven't been - // accepted or rejected yet. - let mut incoming_nodes = HashMap::::new(); - // Next id for incoming connections. - let mut next_incoming_id = IncomingIndex(0); - - // Perform a certain number of actions while checking that the state is consistent. If we - // reach the end of the loop, the run has succeeded. - for _ in 0..2500 { - // Each of these weights corresponds to an action that we may perform. - let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; - match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { - // If we generate 0, poll the peerset. - 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { - Poll::Ready(Some(Message::Connect { peer_id, .. })) => { - if let Some(id) = - incoming_nodes.iter().find(|(_, v)| **v == peer_id).map(|(&id, _)| id) - { - incoming_nodes.remove(&id); - } - assert!(connected_nodes.insert(peer_id)); - }, - Poll::Ready(Some(Message::Drop { peer_id, .. })) => { - connected_nodes.remove(&peer_id); - }, - Poll::Ready(Some(Message::Accept(n))) => { - assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())) - }, - Poll::Ready(Some(Message::Reject(n))) => { - assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())) - }, - Poll::Ready(None) => panic!(), - Poll::Pending => {}, - }, - - // If we generate 1, discover a new node. - 1 => { - let new_id = PeerId::random(); - known_nodes.insert(new_id); - peerset.add_to_peers_set(SetId::from(0), new_id); - }, - - // If we generate 2, adjust a random reputation. - 2 => - if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); - peerset_handle.report_peer(*id, ReputationChange::new(val, "")); - }, - - // If we generate 3, disconnect from a random node. - 3 => - if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { - connected_nodes.remove(&id); - peerset.dropped(SetId::from(0), id, DropReason::Unknown); - }, - - // If we generate 4, connect to a random node. - 4 => { - if let Some(id) = known_nodes - .iter() - .filter(|n| { - incoming_nodes.values().all(|m| m != *n) && - !connected_nodes.contains(*n) - }) - .choose(&mut rng) - { - peerset.incoming(SetId::from(0), *id, next_incoming_id); - incoming_nodes.insert(next_incoming_id, *id); - next_incoming_id.0 += 1; - } - }, - - // 5 and 6 are the reserved-only mode. - 5 => peerset_handle.set_reserved_only(SetId::from(0), true), - 6 => peerset_handle.set_reserved_only(SetId::from(0), false), - - // 7 and 8 are about switching a random node in or out of reserved mode. - 7 => { - if let Some(id) = - known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) - { - peerset_handle.add_reserved_peer(SetId::from(0), *id); - reserved_nodes.insert(*id); - } - }, - 8 => - if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { - reserved_nodes.remove(&id); - peerset_handle.remove_reserved_peer(SetId::from(0), id); - }, - - _ => unreachable!(), - } - } - - Poll::Ready(()) - })); -} diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index ba18a1d4fc70d..5efb10f719c20 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -13,15 +13,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" thiserror = "1.0" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } jsonrpsee = { version = "0.16.2", features = ["server", "client-core", "macros"] } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 8149a1f8d1afe..648dbb295d8d0 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -57,7 +57,7 @@ pub enum Error { } /// Base code for all authorship errors. -const BASE_ERROR: i32 = 1000; +const BASE_ERROR: i32 = crate::error::base::AUTHOR; /// Extrinsic has an invalid format. const BAD_FORMAT: i32 = BASE_ERROR + 1; /// Error during transaction verification in runtime. diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index cfb429bcffd12..6521929425888 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -37,7 +37,7 @@ pub enum Error { } /// Base error code for all chain errors. -const BASE_ERROR: i32 = 3000; +const BASE_ERROR: i32 = crate::error::base::CHAIN; impl From for JsonRpseeError { fn from(e: Error) -> Self { diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index 2896e66bc0a35..8e4ddb55e35d7 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -44,7 +44,7 @@ pub enum Error { } /// Base error code for all dev errors. -const BASE_ERROR: i32 = 6000; +const BASE_ERROR: i32 = crate::error::base::DEV; impl From for JsonRpseeError { fn from(e: Error) -> Self { diff --git a/client/rpc-api/src/error.rs b/client/rpc-api/src/error.rs new file mode 100644 index 0000000000000..72941e3145b94 --- /dev/null +++ b/client/rpc-api/src/error.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Base error code for RPC modules. +pub mod base { + pub const AUTHOR: i32 = 1000; + pub const SYSTEM: i32 = 2000; + pub const CHAIN: i32 = 3000; + pub const STATE: i32 = 4000; + pub const OFFCHAIN: i32 = 5000; + pub const DEV: i32 = 6000; + pub const STATEMENT: i32 = 7000; +} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 83054584370a1..b99c237dc859b 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -22,6 +22,7 @@ #![warn(missing_docs)] +mod error; mod policy; pub use policy::DenyUnsafe; diff --git a/client/rpc-api/src/offchain/error.rs b/client/rpc-api/src/offchain/error.rs index 5ca0476087a5c..679e100089734 100644 --- a/client/rpc-api/src/offchain/error.rs +++ b/client/rpc-api/src/offchain/error.rs @@ -38,7 +38,7 @@ pub enum Error { } /// Base error code for all offchain errors. -const BASE_ERROR: i32 = 5000; +const BASE_ERROR: i32 = crate::error::base::OFFCHAIN; impl From for JsonRpseeError { fn from(e: Error) -> Self { diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index c69b3d9199ce6..9857784e3545c 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -55,7 +55,7 @@ pub enum Error { } /// Base code for all state errors. -const BASE_ERROR: i32 = 4000; +const BASE_ERROR: i32 = crate::error::base::STATE; impl From for JsonRpseeError { fn from(e: Error) -> Self { diff --git a/client/rpc-api/src/statement/error.rs b/client/rpc-api/src/statement/error.rs index 549b147115fb2..8438cc3ec9e94 100644 --- a/client/rpc-api/src/statement/error.rs +++ b/client/rpc-api/src/statement/error.rs @@ -38,7 +38,7 @@ pub enum Error { } /// Base error code for all statement errors. -const BASE_ERROR: i32 = 6000; +const BASE_ERROR: i32 = crate::error::base::STATEMENT; impl From for JsonRpseeError { fn from(e: Error) -> Self { diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index 4ad0f1b690a19..713ade9210d32 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -39,7 +39,7 @@ pub enum Error { } // Base code for all system errors. -const BASE_ERROR: i32 = 2000; +const BASE_ERROR: i32 = crate::error::base::SYSTEM; // Provided block range couldn't be resolved to a list of blocks. const NOT_HEALTHY_ERROR: i32 = BASE_ERROR + 1; // Peer argument is malformatted. diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml index 1f0cac18d324e..599596777b7b6 100644 --- a/client/rpc-spec-v2/Cargo.toml +++ b/client/rpc-spec-v2/Cargo.toml @@ -18,20 +18,22 @@ jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } # Pool for submitting extrinsics required by "transaction" sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } sc-client-api = { version = "4.0.0-dev", path = "../api" } -codec = { package = "parity-scale-codec", version = "3.2.2" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" serde = "1.0" hex = "0.4" futures = "0.3.21" parking_lot = "0.12.1" tokio-stream = { version = "0.1", features = ["sync"] } -array-bytes = "4.1" +tokio = { version = "1.22.0", features = ["sync"] } +array-bytes = "6.1" log = "0.4.17" futures-util = { version = "0.3.19", default-features = false } @@ -44,5 +46,5 @@ sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/comm sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../service" } -sc-utils = { version = "4.0.0-dev", path = "../utils" } assert_matches = "1.3.0" +pretty_assertions = "1.2.1" diff --git a/client/rpc-spec-v2/src/chain_head/api.rs b/client/rpc-spec-v2/src/chain_head/api.rs index ad1e58500d51b..c002b75efe037 100644 --- a/client/rpc-spec-v2/src/chain_head/api.rs +++ b/client/rpc-spec-v2/src/chain_head/api.rs @@ -19,7 +19,7 @@ #![allow(non_snake_case)] //! API trait of the chain head. -use crate::chain_head::event::{ChainHeadEvent, FollowEvent, NetworkConfig}; +use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; #[rpc(client, server)] @@ -34,7 +34,7 @@ pub trait ChainHeadApi { unsubscribe = "chainHead_unstable_unfollow", item = FollowEvent, )] - fn chain_head_unstable_follow(&self, runtime_updates: bool); + fn chain_head_unstable_follow(&self, with_runtime: bool); /// Retrieves the body (list of transactions) of a pinned block. /// @@ -47,17 +47,12 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[subscription( - name = "chainHead_unstable_body", - unsubscribe = "chainHead_unstable_stopBody", - item = ChainHeadEvent, - )] + #[method(name = "chainHead_unstable_body", blocking)] fn chain_head_unstable_body( &self, follow_subscription: String, hash: Hash, - network_config: Option, - ); + ) -> RpcResult; /// Retrieves the header of a pinned block. /// @@ -86,43 +81,33 @@ pub trait ChainHeadApi { #[method(name = "chainHead_unstable_genesisHash", blocking)] fn chain_head_unstable_genesis_hash(&self) -> RpcResult; - /// Return a storage entry at a specific block's state. + /// Returns storage entries at a specific block's state. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[subscription( - name = "chainHead_unstable_storage", - unsubscribe = "chainHead_unstable_stopStorage", - item = ChainHeadEvent, - )] + #[method(name = "chainHead_unstable_storage", blocking)] fn chain_head_unstable_storage( &self, follow_subscription: String, hash: Hash, - key: String, - child_key: Option, - network_config: Option, - ); + items: Vec>, + child_trie: Option, + ) -> RpcResult; /// Call into the Runtime API at a specified block's state. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[subscription( - name = "chainHead_unstable_call", - unsubscribe = "chainHead_unstable_stopCall", - item = ChainHeadEvent, - )] + #[method(name = "chainHead_unstable_call", blocking)] fn chain_head_unstable_call( &self, follow_subscription: String, hash: Hash, function: String, call_parameters: String, - network_config: Option, - ); + ) -> RpcResult; /// Unpin a block reported by the `follow` method. /// diff --git a/client/rpc-spec-v2/src/chain_head/chain_head.rs b/client/rpc-spec-v2/src/chain_head/chain_head.rs index 763fc5d9acc5d..79cf251f18068 100644 --- a/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -18,12 +18,17 @@ //! API implementation for `chainHead`. +use super::{ + chain_head_storage::ChainHeadStorage, + event::{MethodResponseStarted, OperationBodyDone, OperationCallDone}, +}; use crate::{ chain_head::{ api::ChainHeadApiServer, chain_head_follow::ChainHeadFollower, error::Error as ChainHeadRpcError, - event::{ChainHeadEvent, ChainHeadResult, ErrorEvent, FollowEvent, NetworkConfig}, + event::{FollowEvent, MethodResponse, OperationError, StorageQuery, StorageQueryType}, + hex_string, subscription::{SubscriptionManagement, SubscriptionManagementError}, }, SubscriptionTaskExecutor, @@ -42,12 +47,47 @@ use sc_client_api::{ }; use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; -use sp_core::{hexdisplay::HexDisplay, storage::well_known_keys, traits::CallContext, Bytes}; +use sp_core::{traits::CallContext, Bytes}; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2"; +/// The configuration of [`ChainHead`]. +pub struct ChainHeadConfig { + /// The maximum number of pinned blocks across all subscriptions. + pub global_max_pinned_blocks: usize, + /// The maximum duration that a block is allowed to be pinned per subscription. + pub subscription_max_pinned_duration: Duration, + /// The maximum number of ongoing operations per subscription. + pub subscription_max_ongoing_operations: usize, +} + +/// Maximum pinned blocks across all connections. +/// This number is large enough to consider immediate blocks. +/// Note: This should never exceed the `PINNING_CACHE_SIZE` from client/db. +const MAX_PINNED_BLOCKS: usize = 512; + +/// Any block of any subscription should not be pinned more than +/// this constant. When a subscription contains a block older than this, +/// the subscription becomes subject to termination. +/// Note: This should be enough for immediate blocks. +const MAX_PINNED_DURATION: Duration = Duration::from_secs(60); + +/// The maximum number of ongoing operations per subscription. +/// Note: The lower limit imposed by the spec is 16. +const MAX_ONGOING_OPERATIONS: usize = 16; + +impl Default for ChainHeadConfig { + fn default() -> Self { + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: MAX_PINNED_DURATION, + subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, + } + } +} + /// An API for chain head RPC calls. pub struct ChainHead, Block: BlockT, Client> { /// Substrate client. @@ -71,18 +111,17 @@ impl, Block: BlockT, Client> ChainHead { backend: Arc, executor: SubscriptionTaskExecutor, genesis_hash: GenesisHash, - max_pinned_blocks: usize, - max_pinned_duration: Duration, + config: ChainHeadConfig, ) -> Self { - let genesis_hash = format!("0x{:?}", HexDisplay::from(&genesis_hash.as_ref())); - + let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend: backend.clone(), executor, subscriptions: Arc::new(SubscriptionManagement::new( - max_pinned_blocks, - max_pinned_duration, + config.global_max_pinned_blocks, + config.subscription_max_pinned_duration, + config.subscription_max_ongoing_operations, backend, )), genesis_hash, @@ -115,11 +154,8 @@ impl, Block: BlockT, Client> ChainHead { /// Parse hex-encoded string parameter as raw bytes. /// -/// If the parsing fails, the subscription is rejected. -fn parse_hex_param( - sink: &mut SubscriptionSink, - param: String, -) -> Result, SubscriptionEmptyError> { +/// If the parsing fails, returns an error propagated to the RPC method. +fn parse_hex_param(param: String) -> Result, ChainHeadRpcError> { // Methods can accept empty parameters. if param.is_empty() { return Ok(Default::default()) @@ -127,10 +163,7 @@ fn parse_hex_param( match array_bytes::hex2bytes(¶m) { Ok(bytes) => Ok(bytes), - Err(_) => { - let _ = sink.reject(ChainHeadRpcError::InvalidParam(param)); - Err(SubscriptionEmptyError) - }, + Err(_) => Err(ChainHeadRpcError::InvalidParam(param)), } } @@ -152,7 +185,7 @@ where fn chain_head_unstable_follow( &self, mut sink: SubscriptionSink, - runtime_updates: bool, + with_runtime: bool, ) -> SubscriptionResult { let sub_id = match self.accept_subscription(&mut sink) { Ok(sub_id) => sub_id, @@ -162,7 +195,8 @@ where }, }; // Keep track of the subscription. - let Some(rx_stop) = self.subscriptions.insert_subscription(sub_id.clone(), runtime_updates) else { + let Some(sub_data) = self.subscriptions.insert_subscription(sub_id.clone(), with_runtime) + else { // Inserting the subscription can only fail if the JsonRPSee // generated a duplicate subscription ID. debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription already accepted", sub_id); @@ -179,11 +213,11 @@ where client, backend, subscriptions.clone(), - runtime_updates, + with_runtime, sub_id.clone(), ); - chain_head_follow.generate_events(sink, rx_stop).await; + chain_head_follow.generate_events(sink, sub_data).await; subscriptions.remove_subscription(&sub_id); debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription removed", sub_id); @@ -195,60 +229,55 @@ where fn chain_head_unstable_body( &self, - mut sink: SubscriptionSink, follow_subscription: String, hash: Block::Hash, - _network_config: Option, - ) -> SubscriptionResult { - let client = self.client.clone(); - let subscriptions = self.subscriptions.clone(); - - let block_guard = match subscriptions.lock_block(&follow_subscription, hash) { + ) -> RpcResult { + let block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) => { - // Invalid invalid subscription ID. - let _ = sink.send(&ChainHeadEvent::::Disjoint); - return Ok(()) - }, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => return Ok(MethodResponse::LimitReached), Err(SubscriptionManagementError::BlockHashAbsent) => { // Block is not part of the subscription. - let _ = sink.reject(ChainHeadRpcError::InvalidBlock); - return Ok(()) - }, - Err(error) => { - let _ = sink.send(&ChainHeadEvent::::Error(ErrorEvent { - error: error.to_string(), - })); - return Ok(()) + return Err(ChainHeadRpcError::InvalidBlock.into()) }, + Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), }; - let fut = async move { - let _block_guard = block_guard; - let event = match client.block(hash) { - Ok(Some(signed_block)) => { - let extrinsics = signed_block.block.extrinsics(); - let result = format!("0x{:?}", HexDisplay::from(&extrinsics.encode())); - ChainHeadEvent::Done(ChainHeadResult { result }) - }, - Ok(None) => { - // The block's body was pruned. This subscription ID has become invalid. - debug!( - target: LOG_TARGET, - "[body][id={:?}] Stopping subscription because hash={:?} was pruned", - &follow_subscription, - hash - ); - subscriptions.remove_subscription(&follow_subscription); - ChainHeadEvent::::Disjoint - }, - Err(error) => ChainHeadEvent::Error(ErrorEvent { error: error.to_string() }), - }; - let _ = sink.send(&event); + let event = match self.client.block(hash) { + Ok(Some(signed_block)) => { + let extrinsics = signed_block + .block + .extrinsics() + .iter() + .map(|extrinsic| hex_string(&extrinsic.encode())) + .collect(); + FollowEvent::::OperationBodyDone(OperationBodyDone { + operation_id: block_guard.operation_id(), + value: extrinsics, + }) + }, + Ok(None) => { + // The block's body was pruned. This subscription ID has become invalid. + debug!( + target: LOG_TARGET, + "[body][id={:?}] Stopping subscription because hash={:?} was pruned", + &follow_subscription, + hash + ); + self.subscriptions.remove_subscription(&follow_subscription); + return Err(ChainHeadRpcError::InvalidBlock.into()) + }, + Err(error) => FollowEvent::::OperationError(OperationError { + operation_id: block_guard.operation_id(), + error: error.to_string(), + }), }; - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - Ok(()) + let _ = block_guard.response_sender().unbounded_send(event); + Ok(MethodResponse::Started(MethodResponseStarted { + operation_id: block_guard.operation_id(), + discarded_items: None, + })) } fn chain_head_unstable_header( @@ -256,12 +285,10 @@ where follow_subscription: String, hash: Block::Hash, ) -> RpcResult> { - let _block_guard = match self.subscriptions.lock_block(&follow_subscription, hash) { + let _block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) => { - // Invalid invalid subscription ID. - return Ok(None) - }, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => return Ok(None), Err(SubscriptionManagementError::BlockHashAbsent) => { // Block is not part of the subscription. return Err(ChainHeadRpcError::InvalidBlock.into()) @@ -271,7 +298,7 @@ where self.client .header(hash) - .map(|opt_header| opt_header.map(|h| format!("0x{:?}", HexDisplay::from(&h.encode())))) + .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) .map_err(ChainHeadRpcError::FetchBlockHeader) .map_err(Into::into) } @@ -282,164 +309,120 @@ where fn chain_head_unstable_storage( &self, - mut sink: SubscriptionSink, follow_subscription: String, hash: Block::Hash, - key: String, - child_key: Option, - _network_config: Option, - ) -> SubscriptionResult { - let key = StorageKey(parse_hex_param(&mut sink, key)?); + items: Vec>, + child_trie: Option, + ) -> RpcResult { + // Gain control over parameter parsing and returned error. + let items = items + .into_iter() + .map(|query| { + if query.query_type == StorageQueryType::ClosestDescendantMerkleValue { + // Note: remove this once all types are implemented. + return Err(ChainHeadRpcError::InvalidParam( + "Storage query type not supported".into(), + )) + } + + Ok(StorageQuery { + key: StorageKey(parse_hex_param(query.key)?), + query_type: query.query_type, + }) + }) + .collect::, _>>()?; - let child_key = child_key - .map(|child_key| parse_hex_param(&mut sink, child_key)) + let child_trie = child_trie + .map(|child_trie| parse_hex_param(child_trie)) .transpose()? .map(ChildInfo::new_default_from_vec); - let client = self.client.clone(); - let subscriptions = self.subscriptions.clone(); + let block_guard = + match self.subscriptions.lock_block(&follow_subscription, hash, items.len()) { + Ok(block) => block, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => return Ok(MethodResponse::LimitReached), + Err(SubscriptionManagementError::BlockHashAbsent) => { + // Block is not part of the subscription. + return Err(ChainHeadRpcError::InvalidBlock.into()) + }, + Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), + }; - let block_guard = match subscriptions.lock_block(&follow_subscription, hash) { - Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) => { - // Invalid invalid subscription ID. - let _ = sink.send(&ChainHeadEvent::::Disjoint); - return Ok(()) - }, - Err(SubscriptionManagementError::BlockHashAbsent) => { - // Block is not part of the subscription. - let _ = sink.reject(ChainHeadRpcError::InvalidBlock); - return Ok(()) - }, - Err(error) => { - let _ = sink.send(&ChainHeadEvent::::Error(ErrorEvent { - error: error.to_string(), - })); - return Ok(()) - }, - }; + let storage_client = ChainHeadStorage::::new(self.client.clone()); + let operation_id = block_guard.operation_id(); - let fut = async move { - let _block_guard = block_guard; - // The child key is provided, use the key to query the child trie. - if let Some(child_key) = child_key { - // The child key must not be prefixed with ":child_storage:" nor - // ":child_storage:default:". - if well_known_keys::is_default_child_storage_key(child_key.storage_key()) || - well_known_keys::is_child_storage_key(child_key.storage_key()) - { - let _ = sink - .send(&ChainHeadEvent::Done(ChainHeadResult { result: None:: })); - return - } + // The number of operations we are allowed to execute. + let num_operations = block_guard.num_reserved(); + let discarded = items.len().saturating_sub(num_operations); + let mut items = items; + items.truncate(num_operations); - let res = client - .child_storage(hash, &child_key, &key) - .map(|result| { - let result = - result.map(|storage| format!("0x{:?}", HexDisplay::from(&storage.0))); - ChainHeadEvent::Done(ChainHeadResult { result }) - }) - .unwrap_or_else(|error| { - ChainHeadEvent::Error(ErrorEvent { error: error.to_string() }) - }); - let _ = sink.send(&res); - return - } - - // The main key must not be prefixed with b":child_storage:" nor - // b":child_storage:default:". - if well_known_keys::is_default_child_storage_key(&key.0) || - well_known_keys::is_child_storage_key(&key.0) - { - let _ = - sink.send(&ChainHeadEvent::Done(ChainHeadResult { result: None:: })); - return - } - - // Main root trie storage query. - let res = client - .storage(hash, &key) - .map(|result| { - let result = - result.map(|storage| format!("0x{:?}", HexDisplay::from(&storage.0))); - ChainHeadEvent::Done(ChainHeadResult { result }) - }) - .unwrap_or_else(|error| { - ChainHeadEvent::Error(ErrorEvent { error: error.to_string() }) - }); - let _ = sink.send(&res); + let fut = async move { + storage_client.generate_events(block_guard, hash, items, child_trie); }; - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - Ok(()) + self.executor + .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(MethodResponse::Started(MethodResponseStarted { + operation_id, + discarded_items: Some(discarded), + })) } fn chain_head_unstable_call( &self, - mut sink: SubscriptionSink, follow_subscription: String, hash: Block::Hash, function: String, call_parameters: String, - _network_config: Option, - ) -> SubscriptionResult { - let call_parameters = Bytes::from(parse_hex_param(&mut sink, call_parameters)?); - - let client = self.client.clone(); - let subscriptions = self.subscriptions.clone(); + ) -> RpcResult { + let call_parameters = Bytes::from(parse_hex_param(call_parameters)?); - let block_guard = match subscriptions.lock_block(&follow_subscription, hash) { + let block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) => { + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => { // Invalid invalid subscription ID. - let _ = sink.send(&ChainHeadEvent::::Disjoint); - return Ok(()) + return Ok(MethodResponse::LimitReached) }, Err(SubscriptionManagementError::BlockHashAbsent) => { // Block is not part of the subscription. - let _ = sink.reject(ChainHeadRpcError::InvalidBlock); - return Ok(()) - }, - Err(error) => { - let _ = sink.send(&ChainHeadEvent::::Error(ErrorEvent { - error: error.to_string(), - })); - return Ok(()) + return Err(ChainHeadRpcError::InvalidBlock.into()) }, + Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), }; - let fut = async move { - // Reject subscription if runtime_updates is false. - if !block_guard.has_runtime_updates() { - let _ = sink.reject(ChainHeadRpcError::InvalidParam( - "The runtime updates flag must be set".into(), - )); - return - } - - let res = client - .executor() - .call( - hash, - &function, - &call_parameters, - client.execution_extensions().strategies().other, - CallContext::Offchain, - ) - .map(|result| { - let result = format!("0x{:?}", HexDisplay::from(&result)); - ChainHeadEvent::Done(ChainHeadResult { result }) - }) - .unwrap_or_else(|error| { - ChainHeadEvent::Error(ErrorEvent { error: error.to_string() }) - }); + // Reject subscription if with_runtime is false. + if !block_guard.has_runtime() { + return Err(ChainHeadRpcError::InvalidParam( + "The runtime updates flag must be set".to_string(), + ) + .into()) + } - let _ = sink.send(&res); - }; + let event = self + .client + .executor() + .call(hash, &function, &call_parameters, CallContext::Offchain) + .map(|result| { + FollowEvent::::OperationCallDone(OperationCallDone { + operation_id: block_guard.operation_id(), + output: hex_string(&result), + }) + }) + .unwrap_or_else(|error| { + FollowEvent::::OperationError(OperationError { + operation_id: block_guard.operation_id(), + error: error.to_string(), + }) + }); - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - Ok(()) + let _ = block_guard.response_sender().unbounded_send(event); + Ok(MethodResponse::Started(MethodResponseStarted { + operation_id: block_guard.operation_id(), + discarded_items: None, + })) } fn chain_head_unstable_unpin( diff --git a/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index f496f07a37b18..0fa995ce73a09 100644 --- a/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -24,7 +24,7 @@ use crate::chain_head::{ BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, RuntimeVersionEvent, }, - subscription::{SubscriptionManagement, SubscriptionManagementError}, + subscription::{InsertedSubscriptionData, SubscriptionManagement, SubscriptionManagementError}, }; use futures::{ channel::oneshot, @@ -52,7 +52,7 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Subscriptions handle. sub_handle: Arc>, /// Subscription was started with the runtime updates flag. - runtime_updates: bool, + with_runtime: bool, /// Subscription ID. sub_id: String, /// The best reported block by this subscription. @@ -65,10 +65,10 @@ impl, Block: BlockT, Client> ChainHeadFollower, backend: Arc, sub_handle: Arc>, - runtime_updates: bool, + with_runtime: bool, sub_id: String, ) -> Self { - Self { client, backend, sub_handle, runtime_updates, sub_id, best_block_cache: None } + Self { client, backend, sub_handle, with_runtime, sub_id, best_block_cache: None } } } @@ -80,6 +80,8 @@ enum NotificationType { NewBlock(BlockImportNotification), /// The finalized block notification obtained from `finality_notification_stream`. Finalized(FinalityNotification), + /// The response of `chainHead` method calls. + MethodResponse(FollowEvent), } /// The initial blocks that should be reported or ignored by the chainHead. @@ -144,7 +146,7 @@ where parent: Option, ) -> Option { // No runtime versions should be reported. - if !self.runtime_updates { + if !self.with_runtime { return None } @@ -228,7 +230,7 @@ where let initialized_event = FollowEvent::Initialized(Initialized { finalized_block_hash, finalized_block_runtime, - runtime_updates: self.runtime_updates, + with_runtime: self.with_runtime, }); let mut finalized_block_descendants = Vec::with_capacity(initial_blocks.len() + 1); @@ -243,7 +245,7 @@ where block_hash: child, parent_block_hash: parent, new_runtime, - runtime_updates: self.runtime_updates, + with_runtime: self.with_runtime, }); finalized_block_descendants.push(event); @@ -274,7 +276,7 @@ where block_hash, parent_block_hash, new_runtime, - runtime_updates: self.runtime_updates, + with_runtime: self.with_runtime, }); if !is_best_block { @@ -339,9 +341,7 @@ where let mut events = Vec::new(); // Nothing to be done if no finalized hashes are provided. - let Some(first_hash) = finalized_block_hashes.get(0) else { - return Ok(Default::default()) - }; + let Some(first_hash) = finalized_block_hashes.get(0) else { return Ok(Default::default()) }; // Find the parent header. let Some(first_header) = self.client.header(*first_hash)? else { @@ -517,6 +517,7 @@ where self.handle_import_blocks(notification, &startup_point), NotificationType::Finalized(notification) => self.handle_finalized_blocks(notification, &mut to_ignore, &startup_point), + NotificationType::MethodResponse(notification) => Ok(vec![notification]), }; let events = match events { @@ -574,7 +575,7 @@ where pub async fn generate_events( &mut self, mut sink: SubscriptionSink, - rx_stop: oneshot::Receiver<()>, + sub_data: InsertedSubscriptionData, ) { // Register for the new block and finalized notifications. let stream_import = self @@ -587,6 +588,10 @@ where .finality_notification_stream() .map(|notification| NotificationType::Finalized(notification)); + let stream_responses = sub_data + .response_receiver + .map(|response| NotificationType::MethodResponse(response)); + let startup_point = StartupPoint::from(self.client.info()); let (initial_events, pruned_forks) = match self.generate_init_events(&startup_point) { Ok(blocks) => blocks, @@ -604,9 +609,10 @@ where let initial = NotificationType::InitialEvents(initial_events); let merged = tokio_stream::StreamExt::merge(stream_import, stream_finalized); + let merged = tokio_stream::StreamExt::merge(merged, stream_responses); let stream = stream::once(futures::future::ready(initial)).chain(merged); - self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, rx_stop) + self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) .await; } } diff --git a/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs new file mode 100644 index 0000000000000..393e4489c8c07 --- /dev/null +++ b/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -0,0 +1,265 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementation of the `chainHead_storage` method. + +use std::{marker::PhantomData, sync::Arc}; + +use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_api::BlockT; +use sp_core::storage::well_known_keys; + +use crate::chain_head::event::OperationStorageItems; + +use super::{ + event::{ + OperationError, OperationId, StorageQuery, StorageQueryType, StorageResult, + StorageResultType, + }, + hex_string, + subscription::BlockGuard, + FollowEvent, +}; + +/// The maximum number of items the `chainHead_storage` can return +/// before paginations is required. +const MAX_ITER_ITEMS: usize = 10; + +/// The query type of an interation. +enum IterQueryType { + /// Iterating over (key, value) pairs. + Value, + /// Iterating over (key, hash) pairs. + Hash, +} + +/// Generates the events of the `chainHead_storage` method. +pub struct ChainHeadStorage { + /// Substrate client. + client: Arc, + _phantom: PhantomData<(Block, BE)>, +} + +impl ChainHeadStorage { + /// Constructs a new [`ChainHeadStorage`]. + pub fn new(client: Arc) -> Self { + Self { client, _phantom: PhantomData } + } +} + +/// Checks if the provided key (main or child key) is valid +/// for queries. +/// +/// Keys that are identical to `:child_storage:` or `:child_storage:default:` +/// are not queryable. +fn is_key_queryable(key: &[u8]) -> bool { + !well_known_keys::is_default_child_storage_key(key) && + !well_known_keys::is_child_storage_key(key) +} + +/// The result of making a query call. +type QueryResult = Result, String>; + +/// The result of iterating over keys. +type QueryIterResult = Result, String>; + +impl ChainHeadStorage +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + 'static, +{ + /// Fetch the value from storage. + fn query_storage_value( + &self, + hash: Block::Hash, + key: &StorageKey, + child_key: Option<&ChildInfo>, + ) -> QueryResult { + let result = if let Some(child_key) = child_key { + self.client.child_storage(hash, child_key, key) + } else { + self.client.storage(hash, key) + }; + + result + .map(|opt| { + QueryResult::Ok(opt.map(|storage_data| StorageResult { + key: hex_string(&key.0), + result: StorageResultType::Value(hex_string(&storage_data.0)), + })) + }) + .unwrap_or_else(|error| QueryResult::Err(error.to_string())) + } + + /// Fetch the hash of a value from storage. + fn query_storage_hash( + &self, + hash: Block::Hash, + key: &StorageKey, + child_key: Option<&ChildInfo>, + ) -> QueryResult { + let result = if let Some(child_key) = child_key { + self.client.child_storage_hash(hash, child_key, key) + } else { + self.client.storage_hash(hash, key) + }; + + result + .map(|opt| { + QueryResult::Ok(opt.map(|storage_data| StorageResult { + key: hex_string(&key.0), + result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + })) + }) + .unwrap_or_else(|error| QueryResult::Err(error.to_string())) + } + + /// Handle iterating over (key, value) or (key, hash) pairs. + fn query_storage_iter( + &self, + hash: Block::Hash, + key: &StorageKey, + child_key: Option<&ChildInfo>, + ty: IterQueryType, + ) -> QueryIterResult { + let keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys(hash, child_key.to_owned(), Some(key), None) + } else { + self.client.storage_keys(hash, Some(key), None) + } + .map_err(|error| error.to_string())?; + + let mut ret = Vec::with_capacity(MAX_ITER_ITEMS); + let mut keys_iter = keys_iter.take(MAX_ITER_ITEMS); + while let Some(key) = keys_iter.next() { + let result = match ty { + IterQueryType::Value => self.query_storage_value(hash, &key, child_key), + IterQueryType::Hash => self.query_storage_hash(hash, &key, child_key), + }?; + + if let Some(result) = result { + ret.push(result); + } + } + + QueryIterResult::Ok(ret) + } + + /// Generate the block events for the `chainHead_storage` method. + pub fn generate_events( + &self, + block_guard: BlockGuard, + hash: Block::Hash, + items: Vec>, + child_key: Option, + ) { + /// Build and send the opaque error back to the `chainHead_follow` method. + fn send_error( + sender: &TracingUnboundedSender>, + operation_id: String, + error: String, + ) { + let _ = + sender.unbounded_send(FollowEvent::::OperationError(OperationError { + operation_id, + error, + })); + } + + let sender = block_guard.response_sender(); + + if let Some(child_key) = child_key.as_ref() { + if !is_key_queryable(child_key.storage_key()) { + let _ = sender.unbounded_send(FollowEvent::::OperationStorageDone( + OperationId { operation_id: block_guard.operation_id() }, + )); + return + } + } + + let mut storage_results = Vec::with_capacity(items.len()); + for item in items { + if !is_key_queryable(&item.key.0) { + continue + } + + match item.query_type { + StorageQueryType::Value => { + match self.query_storage_value(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => { + send_error::(&sender, block_guard.operation_id(), error); + return + }, + } + }, + StorageQueryType::Hash => + match self.query_storage_hash(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => { + send_error::(&sender, block_guard.operation_id(), error); + return + }, + }, + StorageQueryType::DescendantsValues => match self.query_storage_iter( + hash, + &item.key, + child_key.as_ref(), + IterQueryType::Value, + ) { + Ok(values) => storage_results.extend(values), + Err(error) => { + send_error::(&sender, block_guard.operation_id(), error); + return + }, + }, + StorageQueryType::DescendantsHashes => match self.query_storage_iter( + hash, + &item.key, + child_key.as_ref(), + IterQueryType::Hash, + ) { + Ok(values) => storage_results.extend(values), + Err(error) => { + send_error::(&sender, block_guard.operation_id(), error); + return + }, + }, + _ => continue, + }; + } + + if !storage_results.is_empty() { + let _ = sender.unbounded_send(FollowEvent::::OperationStorageItems( + OperationStorageItems { + operation_id: block_guard.operation_id(), + items: storage_results, + }, + )); + } + + let _ = + sender.unbounded_send(FollowEvent::::OperationStorageDone(OperationId { + operation_id: block_guard.operation_id(), + })); + } +} diff --git a/client/rpc-spec-v2/src/chain_head/error.rs b/client/rpc-spec-v2/src/chain_head/error.rs index 3f31d985de0ff..3b2edb2b00c8c 100644 --- a/client/rpc-spec-v2/src/chain_head/error.rs +++ b/client/rpc-spec-v2/src/chain_head/error.rs @@ -39,6 +39,9 @@ pub enum Error { /// Invalid subscription ID provided by the RPC server. #[error("Invalid subscription ID")] InvalidSubscriptionID, + /// Wait-for-continue event not generated. + #[error("Wait for continue event was not generated for the subscription")] + InvalidContinue, } // Base code for all `chainHead` errors. @@ -51,6 +54,8 @@ const FETCH_BLOCK_HEADER_ERROR: i32 = BASE_ERROR + 2; const INVALID_PARAM_ERROR: i32 = BASE_ERROR + 3; /// Invalid subscription ID. const INVALID_SUB_ID: i32 = BASE_ERROR + 4; +/// Wait-for-continue event not generated. +const INVALID_CONTINUE: i32 = BASE_ERROR + 5; impl From for ErrorObject<'static> { fn from(e: Error) -> Self { @@ -62,6 +67,7 @@ impl From for ErrorObject<'static> { ErrorObject::owned(FETCH_BLOCK_HEADER_ERROR, msg, None::<()>), Error::InvalidParam(_) => ErrorObject::owned(INVALID_PARAM_ERROR, msg, None::<()>), Error::InvalidSubscriptionID => ErrorObject::owned(INVALID_SUB_ID, msg, None::<()>), + Error::InvalidContinue => ErrorObject::owned(INVALID_CONTINUE, msg, None::<()>), } .into() } diff --git a/client/rpc-spec-v2/src/chain_head/event.rs b/client/rpc-spec-v2/src/chain_head/event.rs index 2e070c7ce6ae7..65bc8b247c880 100644 --- a/client/rpc-spec-v2/src/chain_head/event.rs +++ b/client/rpc-spec-v2/src/chain_head/event.rs @@ -21,27 +21,6 @@ use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; use sp_api::ApiError; use sp_version::RuntimeVersion; -use std::num::NonZeroUsize; - -/// The network config parameter is used when a function -/// needs to request the information from its peers. -/// -/// These values can be tweaked depending on the urgency of the JSON-RPC function call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct NetworkConfig { - /// The total number of peers from which the information is requested. - total_attempts: u64, - /// The maximum number of requests to perform in parallel. - /// - /// # Note - /// - /// A zero value is illegal. - max_parallel: NonZeroUsize, - /// The time, in milliseconds, after which a single requests towards one peer - /// is considered unsuccessful. - timeout_ms: u64, -} /// The operation could not be processed due to an error. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -64,7 +43,7 @@ pub struct RuntimeVersionEvent { } /// The runtime event generated if the `follow` subscription -/// has set the `runtime_updates` flag. +/// has set the `with_runtime` flag. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(tag = "type")] @@ -88,7 +67,7 @@ impl From for RuntimeEvent { /// This is the first event generated by the `follow` subscription /// and is submitted only once. /// -/// If the `runtime_updates` flag is set, then this event contains +/// If the `with_runtime` flag is set, then this event contains /// the `RuntimeEvent`, otherwise the `RuntimeEvent` is not present. #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] @@ -99,23 +78,23 @@ pub struct Initialized { /// /// # Note /// - /// This is present only if the `runtime_updates` flag is set for + /// This is present only if the `with_runtime` flag is set for /// the `follow` subscription. pub finalized_block_runtime: Option, /// Privately keep track if the `finalized_block_runtime` should be /// serialized. #[serde(default)] - pub(crate) runtime_updates: bool, + pub(crate) with_runtime: bool, } impl Serialize for Initialized { /// Custom serialize implementation to include the `RuntimeEvent` depending - /// on the internal `runtime_updates` flag. + /// on the internal `with_runtime` flag. fn serialize(&self, serializer: S) -> Result where S: Serializer, { - if self.runtime_updates { + if self.with_runtime { let mut state = serializer.serialize_struct("Initialized", 2)?; state.serialize_field("finalizedBlockHash", &self.finalized_block_hash)?; state.serialize_field("finalizedBlockRuntime", &self.finalized_block_runtime)?; @@ -140,23 +119,23 @@ pub struct NewBlock { /// /// # Note /// - /// This is present only if the `runtime_updates` flag is set for + /// This is present only if the `with_runtime` flag is set for /// the `follow` subscription. pub new_runtime: Option, /// Privately keep track if the `finalized_block_runtime` should be /// serialized. #[serde(default)] - pub(crate) runtime_updates: bool, + pub(crate) with_runtime: bool, } impl Serialize for NewBlock { /// Custom serialize implementation to include the `RuntimeEvent` depending - /// on the internal `runtime_updates` flag. + /// on the internal `with_runtime` flag. fn serialize(&self, serializer: S) -> Result where S: Serializer, { - if self.runtime_updates { + if self.with_runtime { let mut state = serializer.serialize_struct("NewBlock", 3)?; state.serialize_field("blockHash", &self.block_hash)?; state.serialize_field("parentBlockHash", &self.parent_block_hash)?; @@ -189,16 +168,76 @@ pub struct Finalized { pub pruned_block_hashes: Vec, } +/// Indicate the operation id of the event. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationId { + /// The operation id of the event. + pub operation_id: String, +} + +/// The response of the `chainHead_body` method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationBodyDone { + /// The operation id of the event. + pub operation_id: String, + /// Array of hexadecimal-encoded scale-encoded extrinsics found in the block. + pub value: Vec, +} + +/// The response of the `chainHead_call` method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationCallDone { + /// The operation id of the event. + pub operation_id: String, + /// Hexadecimal-encoded output of the runtime function call. + pub output: String, +} + +/// The response of the `chainHead_call` method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationStorageItems { + /// The operation id of the event. + pub operation_id: String, + /// The resulting items. + pub items: Vec, +} + +/// Indicate a problem during the operation. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationError { + /// The operation id of the event. + pub operation_id: String, + /// The reason of the error. + pub error: String, +} + /// The event generated by the `follow` method. /// -/// The events are generated in the following order: -/// 1. Initialized - generated only once to signal the -/// latest finalized block +/// The block events are generated in the following order: +/// 1. Initialized - generated only once to signal the latest finalized block /// 2. NewBlock - a new block was added. -/// 3. BestBlockChanged - indicate that the best block -/// is now the one from this event. The block was -/// announced priorly with the `NewBlock` event. +/// 3. BestBlockChanged - indicate that the best block is now the one from this event. The block was +/// announced priorly with the `NewBlock` event. /// 4. Finalized - State the finalized and pruned blocks. +/// +/// The following events are related to operations: +/// - OperationBodyDone: The response of the `chianHead_body` +/// - OperationCallDone: The response of the `chianHead_call` +/// - OperationStorageItems: Items produced by the `chianHead_storage` +/// - OperationWaitingForContinue: Generated after OperationStorageItems and requires the user to +/// call `chainHead_continue` +/// - OperationStorageDone: The `chianHead_storage` method has produced all the results +/// - OperationInaccessible: The server was unable to provide the result, retries might succeed in +/// the future +/// - OperationError: The server encountered an error, retries will not succeed +/// +/// The stop event indicates that the JSON-RPC server was unable to provide a consistent list of +/// the blocks at the head of the chain. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(tag = "event")] @@ -213,34 +252,101 @@ pub enum FollowEvent { BestBlockChanged(BestBlockChanged), /// A list of finalized and pruned blocks. Finalized(Finalized), + /// The response of the `chainHead_body` method. + OperationBodyDone(OperationBodyDone), + /// The response of the `chainHead_call` method. + OperationCallDone(OperationCallDone), + /// Yield one or more items found in the storage. + OperationStorageItems(OperationStorageItems), + /// Ask the user to call `chainHead_continue` to produce more events + /// regarding the operation id. + OperationWaitingForContinue(OperationId), + /// The responses of the `chainHead_storage` method have been produced. + OperationStorageDone(OperationId), + /// The RPC server was unable to provide the response of the following operation id. + /// + /// Repeating the same operation in the future might succeed. + OperationInaccessible(OperationId), + /// The RPC server encountered an error while processing an operation id. + /// + /// Repeating the same operation in the future will not succeed. + OperationError(OperationError), /// The subscription is dropped and no further events /// will be generated. Stop, } -/// The result of a chain head method. +/// The storage item received as paramter. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ChainHeadResult { - /// Result of the method. - pub result: T, +pub struct StorageQuery { + /// The provided key. + pub key: Key, + /// The type of the storage query. + #[serde(rename = "type")] + pub query_type: StorageQueryType, } -/// The event generated by the body / call / storage methods. +/// The type of the storage query. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -pub enum ChainHeadEvent { - /// The request completed successfully. - Done(ChainHeadResult), - /// The resources requested are inaccessible. - /// - /// Resubmitting the request later might succeed. - Inaccessible(ErrorEvent), - /// An error occurred. This is definitive. - Error(ErrorEvent), - /// The provided subscription ID is stale or invalid. - Disjoint, +pub enum StorageQueryType { + /// Fetch the value of the provided key. + Value, + /// Fetch the hash of the value of the provided key. + Hash, + /// Fetch the closest descendant merkle value. + ClosestDescendantMerkleValue, + /// Fetch the values of all descendants of they provided key. + DescendantsValues, + /// Fetch the hashes of the values of all descendants of they provided key. + DescendantsHashes, +} + +/// The storage result. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct StorageResult { + /// The hex-encoded key of the result. + pub key: String, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, +} + +/// The type of the storage query. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum StorageResultType { + /// Fetch the value of the provided key. + Value(String), + /// Fetch the hash of the value of the provided key. + Hash(String), + /// Fetch the closest descendant merkle value. + ClosestDescendantMerkleValue(String), +} + +/// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "result")] +pub enum MethodResponse { + /// The method has started. + Started(MethodResponseStarted), + /// The RPC server cannot handle the request at the moment. + LimitReached, +} + +/// The `started` result of a method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MethodResponseStarted { + /// The operation id of the response. + pub operation_id: String, + /// The number of items from the back of the `chainHead_storage` that have been discarded. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub discarded_items: Option, } #[cfg(test)] @@ -253,7 +359,7 @@ mod tests { let event: FollowEvent = FollowEvent::Initialized(Initialized { finalized_block_hash: "0x1".into(), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); let ser = serde_json::to_string(&event).unwrap(); @@ -278,7 +384,7 @@ mod tests { let mut initialized = Initialized { finalized_block_hash: "0x1".into(), finalized_block_runtime: Some(runtime_event), - runtime_updates: true, + with_runtime: true, }; let event: FollowEvent = FollowEvent::Initialized(initialized.clone()); @@ -291,8 +397,8 @@ mod tests { assert_eq!(ser, exp); let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); - // The `runtime_updates` field is used for serialization purposes. - initialized.runtime_updates = false; + // The `with_runtime` field is used for serialization purposes. + initialized.with_runtime = false; assert!(matches!( event_dec, FollowEvent::Initialized(ref dec) if dec == &initialized )); @@ -305,7 +411,7 @@ mod tests { block_hash: "0x1".into(), parent_block_hash: "0x2".into(), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); let ser = serde_json::to_string(&event).unwrap(); @@ -331,7 +437,7 @@ mod tests { block_hash: "0x1".into(), parent_block_hash: "0x2".into(), new_runtime: Some(runtime_event), - runtime_updates: true, + with_runtime: true, }; let event: FollowEvent = FollowEvent::NewBlock(new_block.clone()); @@ -345,8 +451,8 @@ mod tests { assert_eq!(ser, exp); let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); - // The `runtime_updates` field is used for serialization purposes. - new_block.runtime_updates = false; + // The `with_runtime` field is used for serialization purposes. + new_block.with_runtime = false; assert!(matches!( event_dec, FollowEvent::NewBlock(ref dec) if dec == &new_block )); @@ -356,7 +462,7 @@ mod tests { block_hash: "0x1".into(), parent_block_hash: "0x2".into(), new_runtime: None, - runtime_updates: true, + with_runtime: true, }; let event: FollowEvent = FollowEvent::NewBlock(new_block.clone()); @@ -364,7 +470,7 @@ mod tests { let exp = r#"{"event":"newBlock","blockHash":"0x1","parentBlockHash":"0x2","newRuntime":null}"#; assert_eq!(ser, exp); - new_block.runtime_updates = false; + new_block.with_runtime = false; let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); assert!(matches!( event_dec, FollowEvent::NewBlock(ref dec) if dec == &new_block @@ -401,11 +507,29 @@ mod tests { } #[test] - fn follow_stop_event() { - let event: FollowEvent = FollowEvent::Stop; + fn follow_op_body_event() { + let event: FollowEvent = FollowEvent::OperationBodyDone(OperationBodyDone { + operation_id: "123".into(), + value: vec!["0x1".into()], + }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"stop"}"#; + let exp = r#"{"event":"operationBodyDone","operationId":"123","value":["0x1"]}"#; + assert_eq!(ser, exp); + + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn follow_op_call_event() { + let event: FollowEvent = FollowEvent::OperationCallDone(OperationCallDone { + operation_id: "123".into(), + output: "0x1".into(), + }); + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"event":"operationCallDone","operationId":"123","output":"0x1"}"#; assert_eq!(ser, exp); let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); @@ -413,68 +537,218 @@ mod tests { } #[test] - fn chain_head_done_event() { - let event: ChainHeadEvent = - ChainHeadEvent::Done(ChainHeadResult { result: "A".into() }); + fn follow_op_storage_items_event() { + let event: FollowEvent = + FollowEvent::OperationStorageItems(OperationStorageItems { + operation_id: "123".into(), + items: vec![StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("0x123".to_string()), + }], + }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"done","result":"A"}"#; + let exp = r#"{"event":"operationStorageItems","operationId":"123","items":[{"key":"0x1","value":"0x123"}]}"#; assert_eq!(ser, exp); - let event_dec: ChainHeadEvent = serde_json::from_str(exp).unwrap(); + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); assert_eq!(event_dec, event); } #[test] - fn chain_head_inaccessible_event() { - let event: ChainHeadEvent = - ChainHeadEvent::Inaccessible(ErrorEvent { error: "A".into() }); + fn follow_op_wait_event() { + let event: FollowEvent = + FollowEvent::OperationWaitingForContinue(OperationId { operation_id: "123".into() }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"inaccessible","error":"A"}"#; + let exp = r#"{"event":"operationWaitingForContinue","operationId":"123"}"#; assert_eq!(ser, exp); - let event_dec: ChainHeadEvent = serde_json::from_str(exp).unwrap(); + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); assert_eq!(event_dec, event); } #[test] - fn chain_head_error_event() { - let event: ChainHeadEvent = ChainHeadEvent::Error(ErrorEvent { error: "A".into() }); + fn follow_op_storage_done_event() { + let event: FollowEvent = + FollowEvent::OperationStorageDone(OperationId { operation_id: "123".into() }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"error","error":"A"}"#; + let exp = r#"{"event":"operationStorageDone","operationId":"123"}"#; assert_eq!(ser, exp); - let event_dec: ChainHeadEvent = serde_json::from_str(exp).unwrap(); + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); assert_eq!(event_dec, event); } #[test] - fn chain_head_disjoint_event() { - let event: ChainHeadEvent = ChainHeadEvent::Disjoint; + fn follow_op_inaccessible_event() { + let event: FollowEvent = + FollowEvent::OperationInaccessible(OperationId { operation_id: "123".into() }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"disjoint"}"#; + let exp = r#"{"event":"operationInaccessible","operationId":"123"}"#; assert_eq!(ser, exp); - let event_dec: ChainHeadEvent = serde_json::from_str(exp).unwrap(); + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); assert_eq!(event_dec, event); } #[test] - fn chain_head_network_config() { - let conf = NetworkConfig { - total_attempts: 1, - max_parallel: NonZeroUsize::new(2).expect("Non zero number; qed"), - timeout_ms: 3, - }; + fn follow_op_error_event() { + let event: FollowEvent = FollowEvent::OperationError(OperationError { + operation_id: "123".into(), + error: "reason".into(), + }); + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"event":"operationError","operationId":"123","error":"reason"}"#; + assert_eq!(ser, exp); + + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn follow_stop_event() { + let event: FollowEvent = FollowEvent::Stop; + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"event":"stop"}"#; + assert_eq!(ser, exp); + + let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn method_response() { + // Response of `call` and `body` + let event = MethodResponse::Started(MethodResponseStarted { + operation_id: "123".into(), + discarded_items: None, + }); + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"result":"started","operationId":"123"}"#; + assert_eq!(ser, exp); + + let event_dec: MethodResponse = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + + // Response of `storage` + let event = MethodResponse::Started(MethodResponseStarted { + operation_id: "123".into(), + discarded_items: Some(1), + }); + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"result":"started","operationId":"123","discardedItems":1}"#; + assert_eq!(ser, exp); + + let event_dec: MethodResponse = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + + // Limit reached. + let event = MethodResponse::LimitReached; + + let ser = serde_json::to_string(&event).unwrap(); + let exp = r#"{"result":"limitReached"}"#; + assert_eq!(ser, exp); + + let event_dec: MethodResponse = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } - let ser = serde_json::to_string(&conf).unwrap(); - let exp = r#"{"totalAttempts":1,"maxParallel":2,"timeoutMs":3}"#; + #[test] + fn chain_head_storage_query() { + // Item with Value. + let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Value }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"value"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Hash }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"hash"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with DescendantsValues. + let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsValues }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"descendantsValues"}"#; assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with DescendantsHashes. + let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsHashes }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"descendantsHashes"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Merkle. + let item = + StorageQuery { key: "0x1", query_type: StorageQueryType::ClosestDescendantMerkleValue }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"closestDescendantMerkleValue"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } - let conf_dec: NetworkConfig = serde_json::from_str(exp).unwrap(); - assert_eq!(conf_dec, conf); + #[test] + fn chain_head_storage_result() { + // Item with Value. + let item = + StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","value":"res"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = + StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with DescendantsValues. + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","closestDescendantMerkleValue":"res"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); } } diff --git a/client/rpc-spec-v2/src/chain_head/mod.rs b/client/rpc-spec-v2/src/chain_head/mod.rs index 1c489d323f195..1bd2288578025 100644 --- a/client/rpc-spec-v2/src/chain_head/mod.rs +++ b/client/rpc-spec-v2/src/chain_head/mod.rs @@ -33,11 +33,19 @@ pub mod error; pub mod event; mod chain_head_follow; +mod chain_head_storage; mod subscription; pub use api::ChainHeadApiServer; -pub use chain_head::ChainHead; +pub use chain_head::{ChainHead, ChainHeadConfig}; pub use event::{ - BestBlockChanged, ChainHeadEvent, ChainHeadResult, ErrorEvent, Finalized, FollowEvent, - Initialized, NetworkConfig, NewBlock, RuntimeEvent, RuntimeVersionEvent, + BestBlockChanged, ErrorEvent, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, + RuntimeVersionEvent, }; + +use sp_core::hexdisplay::{AsBytesRef, HexDisplay}; + +/// Util function to print the results of `chianHead` as hex string +pub(crate) fn hex_string(data: &Data) -> String { + format!("0x{:?}", HexDisplay::from(data)) +} diff --git a/client/rpc-spec-v2/src/chain_head/subscription/error.rs b/client/rpc-spec-v2/src/chain_head/subscription/error.rs index 443ee9fb87a25..38e8fd7384fcb 100644 --- a/client/rpc-spec-v2/src/chain_head/subscription/error.rs +++ b/client/rpc-spec-v2/src/chain_head/subscription/error.rs @@ -21,10 +21,10 @@ use sp_blockchain::Error; /// Subscription management error. #[derive(Debug, thiserror::Error)] pub enum SubscriptionManagementError { - /// The block cannot be pinned into memory because - /// the subscription has exceeded the maximum number - /// of blocks pinned. - #[error("Exceeded pinning limits")] + /// The subscription has exceeded the internal limits + /// regarding the number of pinned blocks in memory or + /// the number of ongoing operations. + #[error("Exceeded pinning or operation limits")] ExceededLimits, /// Error originated from the blockchain (client or backend). #[error("Blockchain error {0}")] diff --git a/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 8865daa83cba2..9f42be4a2f7f6 100644 --- a/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -18,6 +18,7 @@ use futures::channel::oneshot; use sc_client_api::Backend; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; use std::{ collections::{hash_map::Entry, HashMap}, @@ -25,7 +26,10 @@ use std::{ time::{Duration, Instant}, }; -use crate::chain_head::subscription::SubscriptionManagementError; +use crate::chain_head::{subscription::SubscriptionManagementError, FollowEvent}; + +/// The queue size after which the `sc_utils::mpsc::tracing_unbounded` would produce warnings. +const QUEUE_SIZE_WARNING: usize = 512; /// The state machine of a block of a single subscription ID. /// @@ -103,6 +107,62 @@ impl BlockStateMachine { } } +/// Limit the number of ongoing operations across methods. +struct LimitOperations { + /// Limit the number of ongoing operations for this subscription. + semaphore: Arc, +} + +impl LimitOperations { + /// Constructs a new [`LimitOperations`]. + fn new(max_operations: usize) -> Self { + LimitOperations { semaphore: Arc::new(tokio::sync::Semaphore::new(max_operations)) } + } + + /// Reserves capacity to execute at least one operation and at most the requested items. + /// + /// Dropping [`PermitOperations`] without executing an operation will release + /// the reserved capacity. + /// + /// Returns nothing if there's no space available, else returns a permit + /// that guarantees that at least one operation can be executed. + fn reserve_at_most(&self, to_reserve: usize) -> Option { + let num_ops = std::cmp::min(self.semaphore.available_permits(), to_reserve); + + if num_ops == 0 { + return None + } + + let permits = Arc::clone(&self.semaphore) + .try_acquire_many_owned(num_ops.try_into().ok()?) + .ok()?; + + Some(PermitOperations { num_ops, _permit: permits }) + } +} + +/// Permits a number of operations to be executed. +/// +/// [`PermitOperations`] are returned by [`LimitOperations::reserve()`] and are used +/// to guarantee the RPC server can execute the number of operations. +/// +/// The number of reserved items are given back to the [`LimitOperations`] on drop. +struct PermitOperations { + /// The number of operations permitted (reserved). + num_ops: usize, + /// The permit for these operations. + _permit: tokio::sync::OwnedSemaphorePermit, +} + +impl PermitOperations { + /// Returns the number of reserved elements for this permit. + /// + /// This can be smaller than the number of items requested via [`LimitOperations::reserve()`]. + fn num_reserved(&self) -> usize { + self.num_ops + } +} + struct BlockState { /// The state machine of this block. state_machine: BlockStateMachine, @@ -112,10 +172,18 @@ struct BlockState { /// The state of a single subscription ID. struct SubscriptionState { - /// The `runtime_updates` parameter flag of the subscription. - runtime_updates: bool, + /// The `with_runtime` parameter flag of the subscription. + with_runtime: bool, /// Signals the "Stop" event. tx_stop: Option>, + /// The sender of message responses to the `chainHead_follow` events. + /// + /// This object is cloned between methods. + response_sender: TracingUnboundedSender>, + /// Limit the number of ongoing operations. + limits: LimitOperations, + /// The next operation ID. + next_operation_id: usize, /// Track the block hashes available for this subscription. /// /// This implementation assumes: @@ -227,6 +295,20 @@ impl SubscriptionState { } timestamp } + + /// Generate the next operation ID for this subscription. + fn next_operation_id(&mut self) -> usize { + let op_id = self.next_operation_id; + self.next_operation_id = self.next_operation_id.wrapping_add(1); + op_id + } + + /// Reserves capacity to execute at least one operation and at most the requested items. + /// + /// For more details see [`PermitOperations`]. + fn reserve_at_most(&self, to_reserve: usize) -> Option { + self.limits.reserve_at_most(to_reserve) + } } /// Keeps a specific block pinned while the handle is alive. @@ -234,7 +316,10 @@ impl SubscriptionState { /// executing an RPC method call. pub struct BlockGuard> { hash: Block::Hash, - runtime_updates: bool, + with_runtime: bool, + response_sender: TracingUnboundedSender>, + operation_id: String, + permit_operations: PermitOperations, backend: Arc, } @@ -242,7 +327,7 @@ pub struct BlockGuard> { // testing. impl> std::fmt::Debug for BlockGuard { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "BlockGuard hash {:?} runtime_updates {:?}", self.hash, self.runtime_updates) + write!(f, "BlockGuard hash {:?} with_runtime {:?}", self.hash, self.with_runtime) } } @@ -250,19 +335,46 @@ impl> BlockGuard { /// Construct a new [`BlockGuard`] . fn new( hash: Block::Hash, - runtime_updates: bool, + with_runtime: bool, + response_sender: TracingUnboundedSender>, + operation_id: usize, + permit_operations: PermitOperations, backend: Arc, ) -> Result { backend .pin_block(hash) .map_err(|err| SubscriptionManagementError::Custom(err.to_string()))?; - Ok(Self { hash, runtime_updates, backend }) + Ok(Self { + hash, + with_runtime, + response_sender, + operation_id: operation_id.to_string(), + permit_operations, + backend, + }) + } + + /// The `with_runtime` flag of the subscription. + pub fn has_runtime(&self) -> bool { + self.with_runtime + } + + /// Send message responses from the `chainHead` methods to `chainHead_follow`. + pub fn response_sender(&self) -> TracingUnboundedSender> { + self.response_sender.clone() + } + + /// The operation ID of this method. + pub fn operation_id(&self) -> String { + self.operation_id.clone() } - /// The `runtime_updates` flag of the subscription. - pub fn has_runtime_updates(&self) -> bool { - self.runtime_updates + /// Returns the number of reserved elements for this permit. + /// + /// This can be smaller than the number of items requested. + pub fn num_reserved(&self) -> usize { + self.permit_operations.num_reserved() } } @@ -272,6 +384,15 @@ impl> Drop for BlockGuard { } } +/// The data propagated back to the `chainHead_follow` method after +/// the subscription is successfully inserted. +pub struct InsertedSubscriptionData { + /// Signal that the subscription must stop. + pub rx_stop: oneshot::Receiver<()>, + /// Receive message responses from the `chainHead` methods. + pub response_receiver: TracingUnboundedReceiver>, +} + pub struct SubscriptionsInner> { /// Reference count the block hashes across all subscriptions. /// @@ -282,6 +403,8 @@ pub struct SubscriptionsInner> { global_max_pinned_blocks: usize, /// The maximum duration that a block is allowed to be pinned per subscription. local_max_pin_duration: Duration, + /// The maximum number of ongoing operations per subscription. + max_ongoing_operations: usize, /// Map the subscription ID to internal details of the subscription. subs: HashMap>, /// Backend pinning / unpinning blocks. @@ -295,12 +418,14 @@ impl> SubscriptionsInner { pub fn new( global_max_pinned_blocks: usize, local_max_pin_duration: Duration, + max_ongoing_operations: usize, backend: Arc, ) -> Self { SubscriptionsInner { global_blocks: Default::default(), global_max_pinned_blocks, local_max_pin_duration, + max_ongoing_operations, subs: Default::default(), backend, } @@ -310,17 +435,23 @@ impl> SubscriptionsInner { pub fn insert_subscription( &mut self, sub_id: String, - runtime_updates: bool, - ) -> Option> { + with_runtime: bool, + ) -> Option> { if let Entry::Vacant(entry) = self.subs.entry(sub_id) { let (tx_stop, rx_stop) = oneshot::channel(); + let (response_sender, response_receiver) = + tracing_unbounded("chain-head-method-responses", QUEUE_SIZE_WARNING); let state = SubscriptionState:: { - runtime_updates, + with_runtime, tx_stop: Some(tx_stop), + response_sender, + limits: LimitOperations::new(self.max_ongoing_operations), + next_operation_id: 0, blocks: Default::default(), }; entry.insert(state); - Some(rx_stop) + + Some(InsertedSubscriptionData { rx_stop, response_receiver }) } else { None } @@ -328,9 +459,7 @@ impl> SubscriptionsInner { /// Remove the subscription ID with associated pinned blocks. pub fn remove_subscription(&mut self, sub_id: &str) { - let Some(mut sub) = self.subs.remove(sub_id) else { - return - }; + let Some(mut sub) = self.subs.remove(sub_id) else { return }; // The `Stop` event can be generated only once. sub.stop(); @@ -492,8 +621,9 @@ impl> SubscriptionsInner { &mut self, sub_id: &str, hash: Block::Hash, + to_reserve: usize, ) -> Result, SubscriptionManagementError> { - let Some(sub) = self.subs.get(sub_id) else { + let Some(sub) = self.subs.get_mut(sub_id) else { return Err(SubscriptionManagementError::SubscriptionAbsent) }; @@ -501,7 +631,20 @@ impl> SubscriptionsInner { return Err(SubscriptionManagementError::BlockHashAbsent) } - BlockGuard::new(hash, sub.runtime_updates, self.backend.clone()) + let Some(permit_operations) = sub.reserve_at_most(to_reserve) else { + // Error when the server cannot execute at least one operation. + return Err(SubscriptionManagementError::ExceededLimits) + }; + + let operation_id = sub.next_operation_id(); + BlockGuard::new( + hash, + sub.with_runtime, + sub.response_sender.clone(), + operation_id, + permit_operations, + self.backend.clone(), + ) } } @@ -518,6 +661,9 @@ mod tests { Client, ClientBlockImportExt, GenesisInit, }; + /// Maximum number of ongoing operations per subscription ID. + const MAX_OPERATIONS_PER_SUB: usize = 16; + fn init_backend() -> ( Arc>, Arc>>, @@ -539,7 +685,6 @@ mod tests { genesis_block_builder, None, None, - None, Box::new(TaskExecutor::new()), client_config, ) @@ -607,9 +752,14 @@ mod tests { #[test] fn sub_state_register_twice() { + let (response_sender, _response_receiver) = + tracing_unbounded("test-chain-head-method-responses", QUEUE_SIZE_WARNING); let mut sub_state = SubscriptionState:: { - runtime_updates: false, + with_runtime: false, tx_stop: None, + response_sender, + next_operation_id: 0, + limits: LimitOperations::new(MAX_OPERATIONS_PER_SUB), blocks: Default::default(), }; @@ -632,9 +782,14 @@ mod tests { #[test] fn sub_state_register_unregister() { + let (response_sender, _response_receiver) = + tracing_unbounded("test-chain-head-method-responses", QUEUE_SIZE_WARNING); let mut sub_state = SubscriptionState:: { - runtime_updates: false, + with_runtime: false, tx_stop: None, + response_sender, + next_operation_id: 0, + limits: LimitOperations::new(MAX_OPERATIONS_PER_SUB), blocks: Default::default(), }; @@ -667,13 +822,14 @@ mod tests { fn subscription_lock_block() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id = "abc".to_string(); let hash = H256::random(); // Subscription not inserted. - let err = subs.lock_block(&id, hash).unwrap_err(); + let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); let _stop = subs.insert_subscription(id.clone(), true).unwrap(); @@ -681,13 +837,13 @@ mod tests { assert!(subs.insert_subscription(id.clone(), true).is_none()); // No block hash. - let err = subs.lock_block(&id, hash).unwrap_err(); + let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); subs.remove_subscription(&id); // No subscription. - let err = subs.lock_block(&id, hash).unwrap_err(); + let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); } @@ -699,7 +855,8 @@ mod tests { let hash = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id = "abc".to_string(); let _stop = subs.insert_subscription(id.clone(), true).unwrap(); @@ -707,9 +864,9 @@ mod tests { // First time we are pinning the block. assert_eq!(subs.pin_block(&id, hash).unwrap(), true); - let block = subs.lock_block(&id, hash).unwrap(); + let block = subs.lock_block(&id, hash, 1).unwrap(); // Subscription started with runtime updates - assert_eq!(block.has_runtime_updates(), true); + assert_eq!(block.has_runtime(), true); let invalid_id = "abc-invalid".to_string(); let err = subs.unpin_block(&invalid_id, hash).unwrap_err(); @@ -717,7 +874,7 @@ mod tests { // Unpin the block. subs.unpin_block(&id, hash).unwrap(); - let err = subs.lock_block(&id, hash).unwrap_err(); + let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); } @@ -728,7 +885,8 @@ mod tests { let hash = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id = "abc".to_string(); let _stop = subs.insert_subscription(id.clone(), true).unwrap(); @@ -776,7 +934,8 @@ mod tests { let hash_3 = block.header.hash(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id_1 = "abc".to_string(); let id_2 = "abcd".to_string(); @@ -821,7 +980,8 @@ mod tests { futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); // Maximum number of pinned blocks is 2. - let mut subs = SubscriptionsInner::new(2, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(2, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id_1 = "abc".to_string(); let id_2 = "abcd".to_string(); @@ -845,10 +1005,10 @@ mod tests { assert_eq!(err, SubscriptionManagementError::ExceededLimits); // Ensure both subscriptions are removed. - let err = subs.lock_block(&id_1, hash_1).unwrap_err(); + let err = subs.lock_block(&id_1, hash_1, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); - let err = subs.lock_block(&id_2, hash_1).unwrap_err(); + let err = subs.lock_block(&id_2, hash_1, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); assert!(subs.global_blocks.get(&hash_1).is_none()); @@ -871,7 +1031,8 @@ mod tests { futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); // Maximum number of pinned blocks is 2 and maximum pin duration is 5 second. - let mut subs = SubscriptionsInner::new(2, Duration::from_secs(5), backend); + let mut subs = + SubscriptionsInner::new(2, Duration::from_secs(5), MAX_OPERATIONS_PER_SUB, backend); let id_1 = "abc".to_string(); let id_2 = "abcd".to_string(); @@ -895,10 +1056,10 @@ mod tests { assert_eq!(err, SubscriptionManagementError::ExceededLimits); // Ensure both subscriptions are removed. - let err = subs.lock_block(&id_1, hash_1).unwrap_err(); + let err = subs.lock_block(&id_1, hash_1, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); - let _block_guard = subs.lock_block(&id_2, hash_1).unwrap(); + let _block_guard = subs.lock_block(&id_2, hash_1, 1).unwrap(); assert_eq!(*subs.global_blocks.get(&hash_1).unwrap(), 1); assert!(subs.global_blocks.get(&hash_2).is_none()); @@ -920,21 +1081,48 @@ mod tests { fn subscription_check_stop_event() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), backend); + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); let id = "abc".to_string(); - let mut rx_stop = subs.insert_subscription(id.clone(), true).unwrap(); + let mut sub_data = subs.insert_subscription(id.clone(), true).unwrap(); // Check the stop signal was not received. - let res = rx_stop.try_recv().unwrap(); + let res = sub_data.rx_stop.try_recv().unwrap(); assert!(res.is_none()); let sub = subs.subs.get_mut(&id).unwrap(); sub.stop(); // Check the signal was received. - let res = rx_stop.try_recv().unwrap(); + let res = sub_data.rx_stop.try_recv().unwrap(); assert!(res.is_some()); } + + #[test] + fn ongoing_operations() { + // The object can hold at most 2 operations. + let ops = LimitOperations::new(2); + + // One operation is reserved. + let permit_one = ops.reserve_at_most(1).unwrap(); + assert_eq!(permit_one.num_reserved(), 1); + + // Request 2 operations, however there is capacity only for one. + let permit_two = ops.reserve_at_most(2).unwrap(); + // Number of reserved permits is smaller than provided. + assert_eq!(permit_two.num_reserved(), 1); + + // Try to reserve operations when there's no space. + let permit = ops.reserve_at_most(1); + assert!(permit.is_none()); + + // Release capacity. + drop(permit_two); + + // Can reserve again + let permit_three = ops.reserve_at_most(1).unwrap(); + assert_eq!(permit_three.num_reserved(), 1); + } } diff --git a/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index 86e55acc4c176..39618ecfc1b3e 100644 --- a/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -16,7 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use futures::channel::oneshot; use parking_lot::RwLock; use sc_client_api::Backend; use sp_runtime::traits::Block as BlockT; @@ -25,9 +24,9 @@ use std::{sync::Arc, time::Duration}; mod error; mod inner; +use self::inner::SubscriptionsInner; pub use error::SubscriptionManagementError; -pub use inner::BlockGuard; -use inner::SubscriptionsInner; +pub use inner::{BlockGuard, InsertedSubscriptionData}; /// Manage block pinning / unpinning for subscription IDs. pub struct SubscriptionManagement> { @@ -41,12 +40,14 @@ impl> SubscriptionManagement { pub fn new( global_max_pinned_blocks: usize, local_max_pin_duration: Duration, + max_ongoing_operations: usize, backend: Arc, ) -> Self { SubscriptionManagement { inner: RwLock::new(SubscriptionsInner::new( global_max_pinned_blocks, local_max_pin_duration, + max_ongoing_operations, backend, )), } @@ -61,7 +62,7 @@ impl> SubscriptionManagement { &self, sub_id: String, runtime_updates: bool, - ) -> Option> { + ) -> Option> { let mut inner = self.inner.write(); inner.insert_subscription(sub_id, runtime_updates) } @@ -111,15 +112,18 @@ impl> SubscriptionManagement { /// Ensure the block remains pinned until the return object is dropped. /// - /// Returns a [`BlockGuard`] that pins and unpins the block hash in RAII manner. - /// Returns an error if the block hash is not pinned for the subscription or - /// the subscription ID is invalid. + /// Returns a [`BlockGuard`] that pins and unpins the block hash in RAII manner + /// and reserves capacity for ogoing operations. + /// + /// Returns an error if the block hash is not pinned for the subscription, + /// the subscription ID is invalid or the limit of ongoing operations was exceeded. pub fn lock_block( &self, sub_id: &str, hash: Block::Hash, + to_reserve: usize, ) -> Result, SubscriptionManagementError> { let mut inner = self.inner.write(); - inner.lock_block(sub_id, hash) + inner.lock_block(sub_id, hash, to_reserve) } } diff --git a/client/rpc-spec-v2/src/chain_head/test_utils.rs b/client/rpc-spec-v2/src/chain_head/test_utils.rs index ee563debb4502..6e92e87608b44 100644 --- a/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -203,10 +203,7 @@ impl< impl> CallApiAt for ChainHeadMockClient { type StateBackend = >::StateBackend; - fn call_api_at( - &self, - params: CallApiAtParams>::StateBackend>, - ) -> Result, sp_api::ApiError> { + fn call_api_at(&self, params: CallApiAtParams) -> Result, sp_api::ApiError> { self.client.call_api_at(params) } @@ -217,6 +214,14 @@ impl> CallApiAt for ChainHeadMock fn state_at(&self, at: Block::Hash) -> Result { self.client.state_at(at) } + + fn initialize_extensions( + &self, + at: ::Hash, + extensions: &mut sp_api::Extensions, + ) -> Result<(), sp_api::ApiError> { + self.client.initialize_extensions(at, extensions) + } } impl> BlockBackend diff --git a/client/rpc-spec-v2/src/chain_head/tests.rs b/client/rpc-spec-v2/src/chain_head/tests.rs index d3d4fc649e3fe..4bda06d3cf01c 100644 --- a/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1,4 +1,7 @@ -use crate::chain_head::test_utils::ChainHeadMockClient; +use crate::chain_head::{ + event::{MethodResponse, StorageQuery, StorageQueryType, StorageResultType}, + test_utils::ChainHeadMockClient, +}; use super::*; use assert_matches::assert_matches; @@ -6,6 +9,7 @@ use codec::{Decode, Encode}; use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, + rpc_params, types::{error::CallError, EmptyServerParams as EmptyParams}, RpcModule, }; @@ -16,12 +20,12 @@ use sp_api::BlockT; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ - hexdisplay::HexDisplay, storage::well_known_keys::{self, CODE}, testing::TaskExecutor, + Blake2Hasher, Hasher, }; use sp_version::RuntimeVersion; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use substrate_test_runtime::Transfer; use substrate_test_runtime_client::{ prelude::*, runtime, runtime::RuntimeApi, Backend, BlockBuilderExt, Client, @@ -32,6 +36,7 @@ type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; +const MAX_OPERATIONS: usize = 16; const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; @@ -75,8 +80,11 @@ async fn setup_api() -> ( backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -115,8 +123,11 @@ async fn follow_subscription_produces_blocks() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -128,7 +139,7 @@ async fn follow_subscription_produces_blocks() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -142,7 +153,7 @@ async fn follow_subscription_produces_blocks() { block_hash: format!("{:?}", best_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -173,8 +184,11 @@ async fn follow_with_runtime() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -190,7 +204,8 @@ async fn follow_with_runtime() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; + let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); let finalized_block_runtime = @@ -199,9 +214,9 @@ async fn follow_with_runtime() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime, - runtime_updates: false, + with_runtime: false, }); - assert_eq!(event, expected); + pretty_assertions::assert_eq!(event, expected); // Import a new block without runtime changes. // The runtime field must be None in this case. @@ -214,7 +229,7 @@ async fn follow_with_runtime() { block_hash: format!("{:?}", best_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -264,7 +279,7 @@ async fn follow_with_runtime() { block_hash: format!("{:?}", best_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); } @@ -280,21 +295,24 @@ async fn get_genesis() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); let genesis: String = api.call("chainHead_unstable_genesisHash", EmptyParams::new()).await.unwrap(); - assert_eq!(genesis, format!("0x{}", HexDisplay::from(&CHAIN_GENESIS))); + assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); } #[tokio::test] async fn get_header() { let (_client, api, _sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); + let invalid_hash = hex_string(&INVALID_HASH); // Invalid subscription ID must produce no results. let res: Option = api @@ -323,31 +341,36 @@ async fn get_header() { async fn get_body() { let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); + let invalid_hash = hex_string(&INVALID_HASH); - // Subscription ID is stale the disjoint event is emitted. - let mut sub = api - .subscribe("chainHead_unstable_body", ["invalid_sub_id", &invalid_hash]) + // Subscription ID is invalid. + let response: MethodResponse = api + .call("chainHead_unstable_body", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); - let event: ChainHeadEvent = get_next_event(&mut sub).await; - assert_eq!(event, ChainHeadEvent::::Disjoint); + assert_matches!(response, MethodResponse::LimitReached); - // Valid subscription ID with invalid block hash will error. + // Block hash is invalid. let err = api - .subscribe("chainHead_unstable_body", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>("chainHead_unstable_body", [&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" ); - // Obtain valid the body (list of extrinsics). - let mut sub = api.subscribe("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); - let event: ChainHeadEvent = get_next_event(&mut sub).await; - // Block contains no extrinsics. - assert_matches!(event, - ChainHeadEvent::Done(done) if done.result == "0x00" + // Valid call. + let response: MethodResponse = + api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + // Response propagated to `chainHead_follow`. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationBodyDone(done) if done.operation_id == operation_id && done.value.is_empty() ); // Import a block with extrinsics. @@ -373,35 +396,41 @@ async fn get_body() { FollowEvent::BestBlockChanged(_) ); - let mut sub = api.subscribe("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); - let event: ChainHeadEvent = get_next_event(&mut sub).await; - // Hex encoded scale encoded string for the vector of extrinsics. - let expected = format!("0x{:?}", HexDisplay::from(&block.extrinsics.encode())); - assert_matches!(event, - ChainHeadEvent::Done(done) if done.result == expected + // Valid call to a block with extrinsics. + let response: MethodResponse = + api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + // Response propagated to `chainHead_follow`. + let expected_tx = hex_string(&block.extrinsics[0].encode()); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationBodyDone(done) if done.operation_id == operation_id && done.value == vec![expected_tx] ); } #[tokio::test] async fn call_runtime() { - let (_client, api, _sub, sub_id, block) = setup_api().await; + let (_client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); + let invalid_hash = hex_string(&INVALID_HASH); - // Subscription ID is stale the disjoint event is emitted. - let mut sub = api - .subscribe( + // Subscription ID is invalid. + let response: MethodResponse = api + .call( "chainHead_unstable_call", ["invalid_sub_id", &block_hash, "BabeApi_current_epoch", "0x00"], ) .await .unwrap(); - let event: ChainHeadEvent = get_next_event(&mut sub).await; - assert_eq!(event, ChainHeadEvent::::Disjoint); + assert_matches!(response, MethodResponse::LimitReached); - // Valid subscription ID with invalid block hash will error. + // Block hash is invalid. let err = api - .subscribe( + .call::<_, serde_json::Value>( "chainHead_unstable_call", [&sub_id, &invalid_hash, "BabeApi_current_epoch", "0x00"], ) @@ -413,8 +442,9 @@ async fn call_runtime() { // Pass an invalid parameters that cannot be decode. let err = api - .subscribe( + .call::<_, serde_json::Value>( "chainHead_unstable_call", + // 0x0 is invalid. [&sub_id, &block_hash, "BabeApi_current_epoch", "0x0"], ) .await @@ -423,34 +453,43 @@ async fn call_runtime() { Error::Call(CallError::Custom(ref err)) if err.code() == 2003 && err.message().contains("Invalid parameter") ); + // Valid call. let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. - let call_parameters = format!("0x{:?}", HexDisplay::from(&alice_id.encode())); - let mut sub = api - .subscribe( + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = api + .call( "chainHead_unstable_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // Response propagated to `chainHead_follow`. assert_matches!( - get_next_event::>(&mut sub).await, - ChainHeadEvent::Done(done) if done.result == "0x0000000000000000" + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationCallDone(done) if done.operation_id == operation_id && done.output == "0x0000000000000000" ); // The `current_epoch` takes no parameters and not draining the input buffer // will cause the execution to fail. - let mut sub = api - .subscribe( - "chainHead_unstable_call", - [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"], - ) + let response: MethodResponse = api + .call("chainHead_unstable_call", [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"]) .await .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + // Error propagated to `chainHead_follow`. assert_matches!( - get_next_event::>(&mut sub).await, - ChainHeadEvent::Error(event) if event.error.contains("Execution failed") + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationError(error) if error.operation_id == operation_id && error.error.contains("Execution failed") ); } @@ -465,8 +504,11 @@ async fn call_runtime_without_flag() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -492,11 +534,11 @@ async fn call_runtime_without_flag() { FollowEvent::BestBlockChanged(_) ); - // Valid runtime call on a subscription started with `runtime_updates` false. + // Valid runtime call on a subscription started with `with_runtime` false. let alice_id = AccountKeyring::Alice.to_account_id(); - let call_parameters = format!("0x{:?}", HexDisplay::from(&alice_id.encode())); + let call_parameters = hex_string(&alice_id.encode()); let err = api - .subscribe( + .call::<_, serde_json::Value>( "chainHead_unstable_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) @@ -509,23 +551,286 @@ async fn call_runtime_without_flag() { } #[tokio::test] -async fn get_storage() { +async fn get_storage_hash() { let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); - let key = format!("0x{:?}", HexDisplay::from(&KEY)); + let invalid_hash = hex_string(&INVALID_HASH); + let key = hex_string(&KEY); + + // Subscription ID is invalid. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + "invalid_sub_id", + &invalid_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + ], + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + // Block hash is invalid. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &invalid_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + ], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + // Valid call without storage at the key. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key does not have any value associated. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + + // Import a new block with storage changes. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Valid call with storage at the key. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == key && res.items[0].result == StorageResultType::Hash(expected_hash) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + + // Child value set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + + // Valid call with storage at the key. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &genesis_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + &child_info + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == key && res.items[0].result == StorageResultType::Hash(expected_hash) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); +} + +#[tokio::test] +async fn get_storage_multi_query_iter() { + let (mut client, api, mut block_sub, sub_id, _) = setup_api().await; + let key = hex_string(&KEY); + + // Import a new block with storage changes. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Valid call with storage at the key. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![ + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues + } + ] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); + let expected_value = hex_string(&VALUE); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 2 && + res.items[0].key == key && + res.items[1].key == key && + res.items[0].result == StorageResultType::Hash(expected_hash) && + res.items[1].result == StorageResultType::Value(expected_value) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); - // Subscription ID is stale the disjoint event is emitted. - let mut sub = api - .subscribe("chainHead_unstable_storage", ["invalid_sub_id", &invalid_hash, &key]) + // Child value set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &genesis_hash, + vec![ + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues + } + ], + &child_info + ], + ) .await .unwrap(); - let event: ChainHeadEvent = get_next_event(&mut sub).await; - assert_eq!(event, ChainHeadEvent::::Disjoint); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; - // Valid subscription ID with invalid block hash will error. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 2 && + res.items[0].key == key && + res.items[1].key == key && + res.items[0].result == StorageResultType::Hash(expected_hash) && + res.items[1].result == StorageResultType::Value(expected_value) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); +} + +#[tokio::test] +async fn get_storage_value() { + let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; + let block_hash = format!("{:?}", block.header.hash()); + let invalid_hash = hex_string(&INVALID_HASH); + let key = hex_string(&KEY); + + // Subscription ID is invalid. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + "invalid_sub_id", + &invalid_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + ], + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + // Block hash is invalid. let err = api - .subscribe("chainHead_unstable_storage", [&sub_id, &invalid_hash, &key]) + .call::<_, serde_json::Value>( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &invalid_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + ], + ) .await .unwrap_err(); assert_matches!(err, @@ -533,12 +838,26 @@ async fn get_storage() { ); // Valid call without storage at the key. - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &key]) + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result.is_none()); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key does not have any value associated. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); // Import a new block with storage changes. let mut builder = client.new_block(Default::default()).unwrap(); @@ -558,75 +877,329 @@ async fn get_storage() { ); // Valid call with storage at the key. - let expected_value = Some(format!("0x{:?}", HexDisplay::from(&VALUE))); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &key]) + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result == expected_value); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let expected_value = hex_string(&VALUE); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == key && res.items[0].result == StorageResultType::Value(expected_value) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); // Child value set in `setup_api`. - let child_info = format!("0x{:?}", HexDisplay::from(b"child")); + let child_info = hex_string(&CHILD_STORAGE_KEY); let genesis_hash = format!("{:?}", client.genesis_hash()); - let expected_value = Some(format!("0x{:?}", HexDisplay::from(&CHILD_VALUE))); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &genesis_hash, &key, &child_info]) + + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &genesis_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }], + &child_info + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result == expected_value); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let expected_value = hex_string(&CHILD_VALUE); + + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == key && res.items[0].result == StorageResultType::Value(expected_value) + ); + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); } #[tokio::test] -async fn get_storage_wrong_key() { - let (mut _client, api, mut _block_sub, sub_id, block) = setup_api().await; +async fn get_storage_non_queryable_key() { + let (mut _client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); - let key = format!("0x{:?}", HexDisplay::from(&KEY)); + let key = hex_string(&KEY); // Key is prefixed by CHILD_STORAGE_KEY_PREFIX. let mut prefixed_key = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); prefixed_key.extend_from_slice(&KEY); - let prefixed_key = format!("0x{:?}", HexDisplay::from(&prefixed_key)); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &prefixed_key]) + let prefixed_key = hex_string(&prefixed_key); + + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: prefixed_key, query_type: StorageQueryType::Value }] + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result.is_none()); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key is not queryable. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); // Key is prefixed by DEFAULT_CHILD_STORAGE_KEY_PREFIX. let mut prefixed_key = well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec(); prefixed_key.extend_from_slice(&KEY); - let prefixed_key = format!("0x{:?}", HexDisplay::from(&prefixed_key)); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &prefixed_key]) + let prefixed_key = hex_string(&prefixed_key); + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: prefixed_key, query_type: StorageQueryType::Value }] + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result.is_none()); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key is not queryable. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); // Child key is prefixed by CHILD_STORAGE_KEY_PREFIX. let mut prefixed_key = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); - prefixed_key.extend_from_slice(b"child"); - let prefixed_key = format!("0x{:?}", HexDisplay::from(&prefixed_key)); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &key, &prefixed_key]) + prefixed_key.extend_from_slice(CHILD_STORAGE_KEY); + let prefixed_key = hex_string(&prefixed_key); + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }], + &prefixed_key + ], + ) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result.is_none()); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key is not queryable. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); // Child key is prefixed by DEFAULT_CHILD_STORAGE_KEY_PREFIX. let mut prefixed_key = well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec(); - prefixed_key.extend_from_slice(b"child"); - let prefixed_key = format!("0x{:?}", HexDisplay::from(&prefixed_key)); - let mut sub = api - .subscribe("chainHead_unstable_storage", [&sub_id, &block_hash, &key, &prefixed_key]) + prefixed_key.extend_from_slice(CHILD_STORAGE_KEY); + let prefixed_key = hex_string(&prefixed_key); + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key, query_type: StorageQueryType::Value }], + &prefixed_key + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key is not queryable. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); +} + +#[tokio::test] +async fn unique_operation_ids() { + let (mut _client, api, mut block_sub, sub_id, block) = setup_api().await; + let block_hash = format!("{:?}", block.header.hash()); + + let mut op_ids = HashSet::new(); + + // Ensure that operation IDs are unique for multiple method calls. + for _ in 0..5 { + // Valid `chainHead_unstable_body` call. + let response: MethodResponse = + api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationBodyDone(done) if done.operation_id == operation_id && done.value.is_empty() + ); + // Ensure uniqueness. + assert!(op_ids.insert(operation_id)); + + // Valid `chainHead_unstable_storage` call. + let key = hex_string(&KEY); + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The `Done` event is generated directly since the key does not have any value associated. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + // Ensure uniqueness. + assert!(op_ids.insert(operation_id)); + + // Valid `chainHead_unstable_call` call. + let alice_id = AccountKeyring::Alice.to_account_id(); + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = api + .call( + "chainHead_unstable_call", + [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // Response propagated to `chainHead_follow`. + assert_matches!( + get_next_event::>(&mut block_sub).await, + FollowEvent::OperationCallDone(done) if done.operation_id == operation_id && done.output == "0x0000000000000000" + ); + // Ensure uniqueness. + assert!(op_ids.insert(operation_id)); + } +} + +#[tokio::test] +async fn separate_operation_ids_for_subscriptions() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + CHAIN_GENESIS, + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, + ) + .into_rpc(); + + // Create two separate subscriptions. + let mut sub_first = api.subscribe("chainHead_unstable_follow", [true]).await.unwrap(); + let sub_id_first = sub_first.subscription_id(); + let sub_id_first = serde_json::to_string(&sub_id_first).unwrap(); + + let mut sub_second = api.subscribe("chainHead_unstable_follow", [true]).await.unwrap(); + let sub_id_second = sub_second.subscription_id(); + let sub_id_second = serde_json::to_string(&sub_id_second).unwrap(); + + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + let block_hash = format!("{:?}", block.header.hash()); + + // Ensure the imported block is propagated and pinned. + assert_matches!( + get_next_event::>(&mut sub_first).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub_first).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub_first).await, + FollowEvent::BestBlockChanged(_) + ); + + assert_matches!( + get_next_event::>(&mut sub_second).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub_second).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub_second).await, + FollowEvent::BestBlockChanged(_) + ); + + // Each `chainHead_follow` subscription receives a separate operation ID. + let response: MethodResponse = + api.call("chainHead_unstable_body", [&sub_id_first, &block_hash]).await.unwrap(); + let operation_id: String = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + assert_eq!(operation_id, "0"); + + let response: MethodResponse = api + .call("chainHead_unstable_body", [&sub_id_second, &block_hash]) .await .unwrap(); - let event: ChainHeadEvent> = get_next_event(&mut sub).await; - assert_matches!(event, ChainHeadEvent::>::Done(done) if done.result.is_none()); + let operation_id_second: String = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // The second subscription does not increment the operation ID of the first one. + assert_eq!(operation_id_second, "0"); } #[tokio::test] @@ -640,8 +1213,11 @@ async fn follow_generates_initial_blocks() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -681,7 +1257,7 @@ async fn follow_generates_initial_blocks() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -691,7 +1267,7 @@ async fn follow_generates_initial_blocks() { block_hash: format!("{:?}", block_1_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -701,7 +1277,7 @@ async fn follow_generates_initial_blocks() { block_hash: format!("{:?}", block_2_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); // Check block 3. @@ -710,7 +1286,7 @@ async fn follow_generates_initial_blocks() { block_hash: format!("{:?}", block_3_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -730,7 +1306,7 @@ async fn follow_generates_initial_blocks() { block_hash: format!("{:?}", block_4_hash), parent_block_hash: format!("{:?}", block_2_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -768,8 +1344,11 @@ async fn follow_exceeding_pinned_blocks() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - 2, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: 2, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -819,8 +1398,11 @@ async fn follow_with_unpin() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - 2, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: 2, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -847,14 +1429,14 @@ async fn follow_with_unpin() { ); // Unpin an invalid subscription ID must return Ok(()). - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); + let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api .call("chainHead_unstable_unpin", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. - let invalid_hash = format!("0x{:?}", HexDisplay::from(&INVALID_HASH)); + let invalid_hash = hex_string(&INVALID_HASH); let err = api .call::<_, serde_json::Value>("chainHead_unstable_unpin", [&sub_id, &invalid_hash]) .await @@ -900,8 +1482,11 @@ async fn follow_prune_best_block() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -913,7 +1498,7 @@ async fn follow_prune_best_block() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -963,7 +1548,7 @@ async fn follow_prune_best_block() { block_hash: format!("{:?}", block_1_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -978,7 +1563,7 @@ async fn follow_prune_best_block() { block_hash: format!("{:?}", block_3_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -993,7 +1578,7 @@ async fn follow_prune_best_block() { block_hash: format!("{:?}", block_4_hash), parent_block_hash: format!("{:?}", block_3_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -1008,7 +1593,7 @@ async fn follow_prune_best_block() { block_hash: format!("{:?}", block_2_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -1057,8 +1642,11 @@ async fn follow_forks_pruned_block() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -1118,7 +1706,7 @@ async fn follow_forks_pruned_block() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", block_3_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1142,7 +1730,7 @@ async fn follow_forks_pruned_block() { block_hash: format!("{:?}", block_6_hash), parent_block_hash: format!("{:?}", block_3_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -1171,8 +1759,11 @@ async fn follow_report_multiple_pruned_block() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -1233,7 +1824,7 @@ async fn follow_report_multiple_pruned_block() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1242,7 +1833,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_1_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1251,7 +1842,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_2_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1260,7 +1851,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_3_hash), parent_block_hash: format!("{:?}", block_2_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1270,7 +1861,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_4_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1279,7 +1870,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_5_hash), parent_block_hash: format!("{:?}", block_4_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1325,7 +1916,7 @@ async fn follow_report_multiple_pruned_block() { block_hash: format!("{:?}", block_6_hash), parent_block_hash: format!("{:?}", block_3_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); let event: FollowEvent = get_next_event(&mut sub).await; @@ -1365,7 +1956,6 @@ async fn pin_block_references() { genesis_block_builder, None, None, - None, Box::new(TaskExecutor::new()), client_config, ) @@ -1377,8 +1967,11 @@ async fn pin_block_references() { backend.clone(), Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - 3, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: 3, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -1487,8 +2080,11 @@ async fn follow_finalized_before_new_block() { backend, Arc::new(TaskExecutor::default()), CHAIN_GENESIS, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECS), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + }, ) .into_rpc(); @@ -1511,7 +2107,7 @@ async fn follow_finalized_before_new_block() { let expected = FollowEvent::Initialized(Initialized { finalized_block_hash: format!("{:?}", finalized_hash), finalized_block_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1521,7 +2117,7 @@ async fn follow_finalized_before_new_block() { block_hash: format!("{:?}", block_1_hash), parent_block_hash: format!("{:?}", finalized_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1556,7 +2152,7 @@ async fn follow_finalized_before_new_block() { block_hash: format!("{:?}", block_2_hash), parent_block_hash: format!("{:?}", block_1_hash), new_runtime: None, - runtime_updates: false, + with_runtime: false, }); assert_eq!(event, expected); @@ -1566,3 +2162,100 @@ async fn follow_finalized_before_new_block() { }); assert_eq!(event, expected); } + +#[tokio::test] +async fn ensure_operation_limits_works() { + let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); + let builder = TestClientBuilder::new().add_extra_child_storage( + &child_info, + KEY.to_vec(), + CHILD_VALUE.to_vec(), + ); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + // Configure the chainHead with maximum 1 ongoing operations. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + CHAIN_GENESIS, + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: 1, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [true]).await.unwrap(); + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + let block_hash = format!("{:?}", block.header.hash()); + let key = hex_string(&KEY); + + let items = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + + let response: MethodResponse = api + .call("chainHead_unstable_storage", rpc_params![&sub_id, &block_hash, items]) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => { + // Check discarded items. + assert_eq!(started.discarded_items.unwrap(), 3); + started.operation_id + }, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + // No value associated with the provided key. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + + // The storage is finished and capactiy must be released. + let alice_id = AccountKeyring::Alice.to_account_id(); + // Hex encoded scale encoded bytes representing the call parameters. + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = api + .call( + "chainHead_unstable_call", + [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + // Response propagated to `chainHead_follow`. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationCallDone(done) if done.operation_id == operation_id && done.output == "0x0000000000000000" + ); +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 5a6e3e1083923..a3574ed84d011 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" jsonrpsee = { version = "0.16.2", features = ["server"] } log = "0.4.17" @@ -28,13 +28,13 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } tokio = "1.22.0" @@ -48,8 +48,9 @@ sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } tokio = "1.22.0" -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +pretty_assertions = "1.2.1" [features] test-helpers = [] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 00a126500e26d..feee22641ef34 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -37,10 +37,10 @@ use sc_transaction_pool_api::{ error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, TransactionSource, TxHash, }; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_keystore::KeystorePtr; +use sp_keystore::{KeystoreExt, KeystorePtr}; use sp_runtime::{generic, traits::Block as BlockT}; use sp_session::SessionKeys; @@ -122,8 +122,11 @@ where self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; - self.client - .runtime_api() + let mut runtime_api = self.client.runtime_api(); + + runtime_api.register_extension(KeystoreExt::from(self.keystore.clone())); + + runtime_api .generate_session_keys(best_block_hash, None) .map(Into::into) .map_err(|api_err| Error::Client(Box::new(api_err)).into()) diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 1f688e8e85e05..f48b2f9571428 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -66,8 +66,7 @@ struct TestSetup { impl Default for TestSetup { fn default() -> Self { let keystore = Arc::new(MemoryKeystore::new()); - let client_builder = substrate_test_runtime_client::TestClientBuilder::new(); - let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); + let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new().build()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index f81ec991db133..057661d6ec7f5 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -33,7 +33,7 @@ use jsonrpsee::{ types::SubscriptionResult, }; -use sc_rpc_api::{state::ReadProof, DenyUnsafe}; +use sc_rpc_api::DenyUnsafe; use sp_core::{ storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, Bytes, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 20ca5f7131e71..9604d9165f987 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -198,13 +198,7 @@ where .and_then(|block| { self.client .executor() - .call( - block, - &method, - &call_data, - self.client.execution_extensions().strategies().other, - CallContext::Offchain, - ) + .call(block, &method, &call_data, CallContext::Offchain) .map(Into::into) }) .map_err(client_err) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 9e00a04abe386..35352f6d890ed 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -518,11 +518,11 @@ async fn should_return_runtime_version() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; let runtime_version = api.runtime_version(None.into()).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); - assert_eq!(serialized, result); + pretty_assertions::assert_eq!(serialized, result); let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap(); assert_eq!(deserialized, runtime_version); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index b4ce3bbbb7f1c..9ad2fcf778f2d 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -19,7 +19,10 @@ default = ["rocksdb"] rocksdb = ["sc-client-db/rocksdb"] # exposes the client type test-helpers = [] -runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] +runtime-benchmarks = [ + "sc-client-db/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -31,22 +34,22 @@ log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" -serde = "1.0.136" +serde = "1.0.163" serde_json = "1.0.85" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-trie = { version = "7.0.0", path = "../../primitives/trie" } -sp-externalities = { version = "0.13.0", path = "../../primitives/externalities" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-trie = { version = "22.0.0", path = "../../primitives/trie" } +sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } sc-utils = { version = "4.0.0-dev", path = "../utils" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } -sp-storage = { version = "7.0.0", path = "../../primitives/storage" } +sp-storage = { version = "13.0.0", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } @@ -57,7 +60,7 @@ sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } @@ -69,11 +72,9 @@ sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../rpc-spec-v2" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-informant = { version = "0.10.0-dev", path = "../informant" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } -sc-offchain = { version = "4.0.0-dev", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } -sc-storage-monitor = { version = "0.1.0", path = "../storage-monitor" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } async-trait = "0.1.57" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5237a6166b012..fe18d1d002d56 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -43,6 +43,7 @@ use sc_executor::{ use sc_keystore::LocalKeystore; use sc_network::{ config::{FullNetworkConfiguration, SyncMode}, + peer_store::PeerStore, NetworkService, NetworkStateInfo, NetworkStatusProvider, }; use sc_network_bitswap::BitswapRequestHandler; @@ -73,11 +74,7 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; -use std::{ - str::FromStr, - sync::Arc, - time::{Duration, SystemTime}, -}; +use std::{str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. pub type TFullClient = @@ -187,9 +184,7 @@ where let client = { let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( - config.execution_strategies.clone(), - Some(keystore_container.keystore()), - sc_offchain::OffchainDb::factory_from_backend(&*backend), + None, Arc::new(executor.clone()), ); @@ -226,7 +221,7 @@ where wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), no_genesis: matches!( config.network.sync_mode, - SyncMode::Fast { .. } | SyncMode::Warp { .. } + SyncMode::LightState { .. } | SyncMode::Warp { .. } ), wasm_runtime_substitutes, }, @@ -322,19 +317,14 @@ where /// Shared network instance implementing a set of mandatory traits. pub trait SpawnTaskNetwork: - sc_offchain::NetworkProvider + NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static + NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static { } impl SpawnTaskNetwork for T where Block: BlockT, - T: sc_offchain::NetworkProvider - + NetworkStateInfo - + NetworkStatusProvider - + Send - + Sync - + 'static, + T: NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static, { } @@ -368,38 +358,6 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub telemetry: Option<&'a mut Telemetry>, } -/// Build a shared offchain workers instance. -pub fn build_offchain_workers( - config: &Configuration, - spawn_handle: SpawnTaskHandle, - client: Arc, - network: Arc, -) -> Option>> -where - TBl: BlockT, - TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, - >::Api: sc_offchain::OffchainWorkerApi, -{ - let offchain_workers = Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone()))); - - // Inform the offchain worker about new imported blocks - if let Some(offchain) = offchain_workers.clone() { - spawn_handle.spawn( - "offchain-notifications", - Some("offchain-worker"), - sc_offchain::notification_future( - config.role.is_authority(), - client, - offchain, - Clone::clone(&spawn_handle), - network, - ), - ); - } - - offchain_workers -} - /// Spawn the tasks that are required to run a node. pub fn spawn_tasks( params: SpawnTasksParams, @@ -420,10 +378,9 @@ where + Send + 'static, >::Api: sp_api::Metadata - + sc_offchain::OffchainWorkerApi + sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_session::SessionKeys - + sp_api::ApiExt, + + sp_api::ApiExt, TBl: BlockT, TBl::Hash: Unpin, TBl::Header: Unpin, @@ -451,6 +408,7 @@ where client.clone(), chain_info.best_hash, config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), + keystore.clone(), ) .map_err(|e| Error::Application(Box::new(e)))?; @@ -674,24 +632,13 @@ where ) .into_rpc(); - // Maximum pinned blocks across all connections. - // This number is large enough to consider immediate blocks. - // Note: This should never exceed the `PINNING_CACHE_SIZE` from client/db. - const MAX_PINNED_BLOCKS: usize = 512; - - // Any block of any subscription should not be pinned more than - // this constant. When a subscription contains a block older than this, - // the subscription becomes subject to termination. - // Note: This should be enough for immediate blocks. - const MAX_PINNED_SECONDS: u64 = 60; - let chain_head_v2 = sc_rpc_spec_v2::chain_head::ChainHead::new( client.clone(), backend.clone(), task_executor.clone(), client.info().genesis_hash, - MAX_PINNED_BLOCKS, - Duration::from_secs(MAX_PINNED_SECONDS), + // Defaults to sensible limits for the `ChainHead`. + sc_rpc_spec_v2::chain_head::ChainHeadConfig::default(), ) .into_rpc(); @@ -749,6 +696,7 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// Optional warp sync params. pub warp_sync_params: Option>, } + /// Build the network service, the network status sinks and an RPC sender. pub fn build_network( params: BuildNetworkParams, @@ -793,7 +741,8 @@ where if client.requires_full_sync() { match config.network.sync_mode { - SyncMode::Fast { .. } => return Err("Fast sync doesn't work for archive nodes".into()), + SyncMode::LightState { .. } => + return Err("Fast sync doesn't work for archive nodes".into()), SyncMode::Warp => return Err("Warp sync doesn't work for archive nodes".into()), SyncMode::Full => {}, } @@ -822,12 +771,14 @@ where }; let (state_request_protocol_config, state_request_protocol_name) = { + let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize + + net_config.network_config.default_peers_set.reserved_nodes.len(); // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, config.chain_spec.fork_id(), client.clone(), - net_config.network_config.default_peers_set_num_full as usize, + num_peer_hint, ); let config_name = protocol_config.name.clone(); @@ -895,6 +846,18 @@ where ); net_config.add_notification_protocol(transactions_handler_proto.set_config()); + // Create `PeerStore` and initialize it with bootnode peer ids. + let peer_store = PeerStore::new( + net_config + .network_config + .boot_nodes + .iter() + .map(|bootnode| bootnode.peer_id) + .collect(), + ); + let peer_store_handle = peer_store.handle(); + spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); + let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (engine, sync_service, block_announce_config) = SyncingEngine::new( @@ -926,6 +889,7 @@ where }) }, network_config: net_config, + peer_store: peer_store_handle, genesis_hash, protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 7f83d62874c8e..86b5c7c61fcd2 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -21,15 +21,14 @@ use sc_client_api::{ backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend, }; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; -use sp_api::{ProofRecorder, StorageTransactionCache}; -use sp_core::{ - traits::{CallContext, CodeExecutor, RuntimeCode}, - ExecutionContext, -}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_state_machine::{ - backend::AsTrieBackend, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, +use sp_api::ProofRecorder; +use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode}; +use sp_externalities::Extensions; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashingFor}, }; +use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof}; use std::{cell::RefCell, sync::Arc}; /// Call executor that executes methods locally, querying all required @@ -123,8 +122,7 @@ where ) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, state, None); + let mut ext = Ext::new(&mut overlay, state, None); self.executor .runtime_version(&mut ext, code) @@ -166,7 +164,6 @@ where at_hash: Block::Hash, method: &str, call_data: &[u8], - strategy: ExecutionStrategy, context: CallContext, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); @@ -180,11 +177,7 @@ where let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; - let extensions = self.execution_extensions.extensions( - at_hash, - at_number, - ExecutionContext::OffchainCall(None), - ); + let mut extensions = self.execution_extensions.extensions(at_hash, at_number); let mut sm = StateMachine::new( &state, @@ -192,14 +185,13 @@ where &self.executor, method, call_data, - extensions, + &mut extensions, &runtime_code, context, ) .set_parent_hash(at_hash); - sm.execute_using_consensus_failure_handler(strategy.get_manager()) - .map_err(Into::into) + sm.execute().map_err(Into::into) } fn contextual_call( @@ -207,25 +199,13 @@ where at_hash: Block::Hash, method: &str, call_data: &[u8], - changes: &RefCell, - storage_transaction_cache: Option<&RefCell>>, + changes: &RefCell>>, recorder: &Option>, - context: ExecutionContext, + call_context: CallContext, + extensions: &RefCell, ) -> Result, sp_blockchain::Error> { - let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - - let at_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(at_hash))?; let state = self.backend.state_at(at_hash)?; - let call_context = match context { - ExecutionContext::OffchainCall(_) => CallContext::Offchain, - _ => CallContext::Onchain, - }; - - let (execution_manager, extensions) = - self.execution_extensions.manager_and_extensions(at_hash, at_number, context); - let changes = &mut *changes.borrow_mut(); // It is important to extract the runtime code here before we create the proof @@ -236,6 +216,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let mut extensions = extensions.borrow_mut(); match recorder { Some(recorder) => { @@ -251,13 +232,12 @@ where &self.executor, method, call_data, - extensions, + &mut extensions, &runtime_code, call_context, ) - .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler(execution_manager) + state_machine.execute() }, None => { let mut state_machine = StateMachine::new( @@ -266,13 +246,12 @@ where &self.executor, method, call_data, - extensions, + &mut extensions, &runtime_code, call_context, ) - .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler(execution_manager) + state_machine.execute() }, } .map_err(Into::into) @@ -311,11 +290,7 @@ where method, call_data, &runtime_code, - self.execution_extensions.extensions( - at_hash, - at_number, - ExecutionContext::OffchainCall(None), - ), + &mut self.execution_extensions.extensions(at_hash, at_number), ) .map_err(Into::into) } @@ -411,7 +386,6 @@ mod tests { backend.clone(), executor.clone(), genesis_block_builder, - None, Box::new(TaskExecutor::new()), None, None, @@ -430,8 +404,6 @@ mod tests { ) .unwrap(), execution_extensions: Arc::new(ExecutionExtensions::new( - Default::default(), - None, None, Arc::new(executor.clone()), )), @@ -486,7 +458,6 @@ mod tests { backend.clone(), executor.clone(), genesis_block_builder, - None, Box::new(TaskExecutor::new()), None, None, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 91c59cdbac844..a0983d823e5b1 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -52,7 +52,7 @@ use sp_api::{ }; use sp_blockchain::{ self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error, - HeaderBackend as ChainHeaderBackend, HeaderMetadata, + HeaderBackend as ChainHeaderBackend, HeaderMetadata, Info as BlockchainInfo, }; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; @@ -62,14 +62,12 @@ use sp_core::{ well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey, }, - traits::SpawnNamed, + traits::{CallContext, SpawnNamed}, }; -#[cfg(feature = "test-helpers")] -use sp_keystore::KeystorePtr; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{ - Block as BlockT, BlockIdTo, HashFor, Header as HeaderT, NumberFor, One, + Block as BlockT, BlockIdTo, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, Digest, Justification, Justifications, StateVersion, @@ -150,9 +148,9 @@ impl PrePostHeader { } } -enum PrepareStorageChangesResult, Block: BlockT> { +enum PrepareStorageChangesResult { Discard(ImportResult), - Import(Option>>), + Import(Option>), } /// Create an instance of in-memory client. @@ -161,7 +159,6 @@ pub fn new_in_mem( backend: Arc>, executor: E, genesis_block_builder: G, - keystore: Option, prometheus_registry: Option, telemetry: Option, spawn_handle: Box, @@ -181,7 +178,6 @@ where backend, executor, genesis_block_builder, - keystore, spawn_handle, prometheus_registry, telemetry, @@ -224,7 +220,6 @@ pub fn new_with_backend( backend: Arc, executor: E, genesis_block_builder: G, - keystore: Option, spawn_handle: Box, prometheus_registry: Option, telemetry: Option, @@ -239,12 +234,7 @@ where Block: BlockT, B: backend::LocalBackend + 'static, { - let extensions = ExecutionExtensions::new( - Default::default(), - keystore, - sc_offchain::OffchainDb::factory_from_backend(&*backend), - Arc::new(executor.clone()), - ); + let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone())); let call_executor = LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?; @@ -499,15 +489,12 @@ where fn apply_block( &self, operation: &mut ClientImportOperation, - import_block: BlockImportParams>, - storage_changes: Option< - sc_consensus::StorageChanges>, - >, + import_block: BlockImportParams, + storage_changes: Option>, ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, - >::Api: - CoreApi + ApiExt, + >::Api: CoreApi + ApiExt, { let BlockImportParams { origin, @@ -590,9 +577,7 @@ where justifications: Option, body: Option>, indexed_body: Option>>, - storage_changes: Option< - sc_consensus::StorageChanges>, - >, + storage_changes: Option>, finalized: bool, aux: Vec<(Vec, Option>)>, fork_choice: ForkChoiceStrategy, @@ -600,8 +585,7 @@ where ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, - >::Api: - CoreApi + ApiExt, + >::Api: CoreApi + ApiExt, { let parent_hash = *import_headers.post().parent_hash(); let status = self.backend.blockchain().status(hash)?; @@ -717,7 +701,7 @@ where operation, parent_hash, None, - info.best_hash, + &info, make_notifications, )?; } @@ -840,12 +824,11 @@ where /// provided, the block is re-executed to get the storage changes. fn prepare_block_storage_changes( &self, - import_block: &mut BlockImportParams>, - ) -> sp_blockchain::Result> + import_block: &mut BlockImportParams, + ) -> sp_blockchain::Result> where Self: ProvideRuntimeApi, - >::Api: - CoreApi + ApiExt, + >::Api: CoreApi + ApiExt, { let parent_hash = import_block.header.parent_hash(); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); @@ -875,12 +858,12 @@ where // We should enact state, but don't have any storage changes, so we need to execute the // block. (true, None, Some(ref body)) => { - let runtime_api = self.runtime_api(); - let execution_context = import_block.origin.into(); + let mut runtime_api = self.runtime_api(); + + runtime_api.set_call_context(CallContext::Onchain); - runtime_api.execute_block_with_context( + runtime_api.execute_block( *parent_hash, - execution_context, Block::new(import_block.header.clone(), body.clone()), )?; @@ -907,52 +890,56 @@ where fn apply_finality_with_block_hash( &self, operation: &mut ClientImportOperation, - block: Block::Hash, + hash: Block::Hash, justification: Option, - best_block: Block::Hash, + info: &BlockchainInfo, notify: bool, ) -> sp_blockchain::Result<()> { - // find tree route from last finalized to given block. - let last_finalized = self.backend.blockchain().last_finalized()?; - - if block == last_finalized { + if hash == info.finalized_hash { warn!( "Possible safety violation: attempted to re-finalize last finalized block {:?} ", - last_finalized + hash, ); return Ok(()) } + // Find tree route from last finalized to given block. let route_from_finalized = - sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; + sp_blockchain::tree_route(self.backend.blockchain(), info.finalized_hash, hash)?; if let Some(retracted) = route_from_finalized.retracted().get(0) { warn!( "Safety violation: attempted to revert finalized block {:?} which is not in the \ same chain as last finalized {:?}", - retracted, last_finalized + retracted, info.finalized_hash ); return Err(sp_blockchain::Error::NotInFinalizedChain) } - // If there is only one leaf, best block is guaranteed to be - // a descendant of the new finalized block. If not, - // we need to check. - if self.backend.blockchain().leaves()?.len() > 1 { + // We may need to coercively update the best block if there is more than one + // leaf or if the finalized block number is greater than last best number recorded + // by the backend. This last condition may apply in case of consensus implementations + // not always checking this condition. + let block_number = self + .backend + .blockchain() + .number(hash)? + .ok_or(Error::MissingHeader(format!("{hash:?}")))?; + if self.backend.blockchain().leaves()?.len() > 1 || info.best_number < block_number { let route_from_best = - sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, hash)?; - // if the block is not a direct ancestor of the current best chain, + // If the block is not a direct ancestor of the current best chain, // then some other block is the common ancestor. - if route_from_best.common_block().hash != block { + if route_from_best.common_block().hash != hash { // NOTE: we're setting the finalized block as best block, this might // be slightly inaccurate since we might have a "better" block // further along this chain, but since best chain selection logic is // plugable we cannot make a better choice here. usages that need // an accurate "best" block need to go through `SelectChain` // instead. - operation.op.mark_head(block)?; + operation.op.mark_head(hash)?; } } @@ -962,8 +949,8 @@ where operation.op.mark_finalized(finalize_new.hash, None)?; } - assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(block, justification)?; + assert_eq!(enacted.last().map(|e| e.hash), Some(hash)); + operation.op.mark_finalized(hash, justification)?; if notify { let finalized = @@ -985,7 +972,7 @@ where let header = self .backend .blockchain() - .header(block)? + .header(hash)? .expect("Block to finalize expected to be onchain; qed"); operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_heads }); @@ -1129,7 +1116,7 @@ where } /// Get blockchain info. - pub fn chain_info(&self) -> blockchain::Info { + pub fn chain_info(&self) -> BlockchainInfo { self.backend.blockchain().info() } @@ -1273,11 +1260,11 @@ where // this is a read proof, using version V0 or V1 is equivalent. let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; - let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( + let (proof, count) = prove_range_read_with_child_with_size::<_, HashingFor>( state, size_limit, start_key, )?; let proof = proof - .into_compact_proof::>(root) + .into_compact_proof::>(root) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; Ok((proof, count)) } @@ -1400,16 +1387,16 @@ where proof: CompactProof, start_key: &[Vec], ) -> sp_blockchain::Result<(KeyValueStates, usize)> { - let mut db = sp_state_machine::MemoryDB::>::new(&[]); + let mut db = sp_state_machine::MemoryDB::>::new(&[]); // Compact encoding - let _ = sp_trie::decode_compact::>, _, _>( + let _ = sp_trie::decode_compact::>, _, _>( &mut db, proof.iter_compact_encoded_nodes(), Some(&root), ) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); - let state = read_range_proof_check_with_child_on_proving_backend::>( + let state = read_range_proof_check_with_child_on_proving_backend::>( &proving_backend, start_key, )?; @@ -1424,8 +1411,7 @@ where E: CallExecutor + Send + Sync + 'static, Block: BlockT, Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: - ApiExt> + BlockBuilderApi, + >::Api: ApiExt + BlockBuilderApi, { fn new_block_at>( &self, @@ -1711,19 +1697,16 @@ where { type StateBackend = B::State; - fn call_api_at( - &self, - params: CallApiAtParams, - ) -> Result, sp_api::ApiError> { + fn call_api_at(&self, params: CallApiAtParams) -> Result, sp_api::ApiError> { self.executor .contextual_call( params.at, params.function, ¶ms.arguments, params.overlayed_changes, - Some(params.storage_transaction_cache), params.recorder, - params.context, + params.call_context, + params.extensions, ) .map_err(Into::into) } @@ -1735,6 +1718,18 @@ where fn state_at(&self, at: Block::Hash) -> Result { self.state_at(at).map_err(Into::into) } + + fn initialize_extensions( + &self, + at: Block::Hash, + extensions: &mut sp_externalities::Extensions, + ) -> Result<(), sp_api::ApiError> { + let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?; + + extensions.merge(self.executor.execution_extensions().extensions(at, block_number)); + + Ok(()) + } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport @@ -1747,13 +1742,10 @@ where E: CallExecutor + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - CoreApi + ApiExt, + as ProvideRuntimeApi>::Api: CoreApi + ApiExt, RA: Sync + Send, - backend::TransactionFor: Send + 'static, { type Error = ConsensusError; - type Transaction = backend::TransactionFor; /// Import a checked and validated block. If a justification is provided in /// `BlockImportParams` then `finalized` *must* be true. @@ -1766,7 +1758,7 @@ where /// algorithm, don't use this function. async fn import_block( &mut self, - mut import_block: BlockImportParams>, + mut import_block: BlockImportParams, ) -> Result { let span = tracing::span!(tracing::Level::DEBUG, "import_block"); let _enter = span.enter(); @@ -1860,17 +1852,14 @@ where E: CallExecutor + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, - >::Api: - CoreApi + ApiExt, + >::Api: CoreApi + ApiExt, RA: Sync + Send, - backend::TransactionFor: Send + 'static, { type Error = ConsensusError; - type Transaction = backend::TransactionFor; async fn import_block( &mut self, - import_block: BlockImportParams, + import_block: BlockImportParams, ) -> Result { (&*self).import_block(import_block).await } @@ -1896,8 +1885,8 @@ where justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { - let last_best = self.backend.blockchain().info().best_hash; - self.apply_finality_with_block_hash(operation, hash, justification, last_best, notify) + let info = self.backend.blockchain().info(); + self.apply_finality_with_block_hash(operation, hash, justification, &info, notify) } fn finalize_block( diff --git a/client/service/src/config.rs b/client/service/src/config.rs index c0fb2dc9c4c70..39b7ee0507906 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -18,7 +18,6 @@ //! Service configuration. -pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; pub use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; pub use sc_network::{ @@ -81,8 +80,6 @@ pub struct Configuration { /// over on-chain runtimes when the spec version matches. Set to `None` to /// disable overrides (default). pub wasm_runtime_overrides: Option, - /// Execution strategies. - pub execution_strategies: ExecutionStrategies, /// JSON-RPC server binding address. pub rpc_addr: Option, /// Maximum number of connections for JSON-RPC server. @@ -234,7 +231,7 @@ impl Configuration { /// Returns true if the genesis state writting will be skipped while initializing the genesis /// block. pub fn no_genesis(&self) -> bool { - matches!(self.network.sync_mode, SyncMode::Fast { .. } | SyncMode::Warp { .. }) + matches!(self.network.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp { .. }) } /// Returns the database config for creating the backend. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index e658c7f7d9c71..0961967f9ca20 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -55,8 +55,8 @@ use sp_runtime::{ pub use self::{ builder::{ - build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, - new_full_parts, new_full_parts_with_genesis_builder, new_native_or_wasm_executor, + build_network, new_client, new_db_backend, new_full_client, new_full_parts, + new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, }, @@ -211,7 +211,7 @@ async fn build_network_future< } /// Builds a future that processes system RPC requests. -async fn build_system_rpc_future< +pub async fn build_system_rpc_future< B: BlockT, C: BlockchainEvents + HeaderBackend @@ -237,7 +237,7 @@ async fn build_system_rpc_future< // Answer incoming RPC requests. let Some(req) = rpc_rx.next().await else { debug!("RPC requests stream has terminated, shutting down the system RPC future."); - return; + return }; match req { @@ -421,6 +421,13 @@ pub struct TransactionPoolAdapter { client: Arc, } +impl TransactionPoolAdapter { + /// Constructs a new instance of [`TransactionPoolAdapter`]. + pub fn new(pool: Arc

, client: Arc) -> Self { + Self { pool, client } + } +} + /// Get transactions for propagation. /// /// Function extracted to simplify the test and prevent creating `ServiceFactory`. diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 94a844aa7dc0d..dea7bfaa5b7d8 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-channel = "1.8.0" -array-bytes = "4.1" +array-bytes = "6.1" fdlimit = "0.2.1" futures = "0.3.21" log = "0.4.17" -parity-scale-codec = "3.2.2" +parity-scale-codec = "3.6.1" parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } @@ -27,19 +27,18 @@ sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../.. sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } -sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-network-sync = { version = "0.10.0-dev", path = "../../network/sync" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } -sp-storage = { version = "7.0.0", path = "../../../primitives/storage" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } -sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } +sp-storage = { version = "13.0.0", path = "../../../primitives/storage" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } +sp-trie = { version = "22.0.0", path = "../../../primitives/trie" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index e71e211767f3d..c40ac33da4bb9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -37,9 +37,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, ConsensusEngineId, Justifications, StateVersion, }; -use sp_state_machine::{ - backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, -}; +use sp_state_machine::{backend::Backend as _, InMemoryBackend, OverlayedChanges, StateMachine}; use sp_storage::{ChildInfo, StorageKey}; use sp_trie::{LayoutV0, TrieConfiguration}; use std::{collections::HashSet, sync::Arc}; @@ -90,11 +88,11 @@ fn construct_block( &new_native_or_wasm_executor(), "Core_initialize_block", &header.encode(), - Default::default(), + &mut Default::default(), &runtime_code, CallContext::Onchain, ) - .execute(ExecutionStrategy::NativeElseWasm) + .execute() .unwrap(); for tx in transactions.iter() { @@ -104,11 +102,11 @@ fn construct_block( &new_native_or_wasm_executor(), "BlockBuilder_apply_extrinsic", &tx.encode(), - Default::default(), + &mut Default::default(), &runtime_code, CallContext::Onchain, ) - .execute(ExecutionStrategy::NativeElseWasm) + .execute() .unwrap(); } @@ -118,11 +116,11 @@ fn construct_block( &new_native_or_wasm_executor(), "BlockBuilder_finalize_block", &[], - Default::default(), + &mut Default::default(), &runtime_code, CallContext::Onchain, ) - .execute(ExecutionStrategy::NativeElseWasm) + .execute() .unwrap(); header = Header::decode(&mut &ret_data[..]).unwrap(); @@ -189,11 +187,11 @@ fn construct_genesis_should_work_with_native() { &new_native_or_wasm_executor(), "Core_execute_block", &b1data, - Default::default(), + &mut Default::default(), &runtime_code, CallContext::Onchain, ) - .execute(ExecutionStrategy::NativeElseWasm) + .execute() .unwrap(); } @@ -220,11 +218,11 @@ fn construct_genesis_should_work_with_wasm() { &new_native_or_wasm_executor(), "Core_execute_block", &b1data, - Default::default(), + &mut Default::default(), &runtime_code, CallContext::Onchain, ) - .execute(ExecutionStrategy::AlwaysWasm) + .execute() .unwrap(); } @@ -1670,22 +1668,21 @@ fn storage_keys_prefix_and_start_key_works() { let block_hash = client.info().best_hash; - let child_root = b":child_storage:default:child".to_vec(); + let child_root = array_bytes::bytes2hex("", b":child_storage:default:child"); let prefix = StorageKey(array_bytes::hex2bytes_unchecked("3a")); let child_prefix = StorageKey(b"sec".to_vec()); let res: Vec<_> = client .storage_keys(block_hash, Some(&prefix), None) .unwrap() - .map(|x| x.0) + .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); assert_eq!( res, [ - child_root.clone(), - array_bytes::hex2bytes_unchecked("3a636f6465"), //":code" - array_bytes::hex2bytes_unchecked("3a65787472696e7369635f696e646578"), //":extrinsic_index" - array_bytes::hex2bytes_unchecked("3a686561707061676573"), //":heappages" + &child_root, + "3a636f6465", //":code" + "3a65787472696e7369635f696e646578", //":extrinsic_index" ] ); @@ -1696,15 +1693,9 @@ fn storage_keys_prefix_and_start_key_works() { Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) .unwrap() - .map(|x| x.0) + .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); - assert_eq!( - res, - [ - array_bytes::hex2bytes_unchecked("3a65787472696e7369635f696e646578"), - array_bytes::hex2bytes_unchecked("3a686561707061676573") - ] - ); + assert_eq!(res, ["3a65787472696e7369635f696e646578",]); let res: Vec<_> = client .storage_keys( @@ -1737,7 +1728,7 @@ fn storage_keys_works() { sp_tracing::try_init_simple(); let expected_keys = - substrate_test_runtime::storage_key_generator::get_expected_storage_hashed_keys(); + substrate_test_runtime::storage_key_generator::get_expected_storage_hashed_keys(false); let client = substrate_test_runtime_client::new(); let block_hash = client.info().best_hash; @@ -1776,10 +1767,10 @@ fn storage_keys_works() { res, expected_keys .iter() - .filter(|&i| i > &"3a636f64".to_string()) + .filter(|&i| *i > "3a636f64") .take(8) .cloned() - .collect::>() + .collect::>() ); // Starting at a complete key the first key is skipped. @@ -1797,10 +1788,10 @@ fn storage_keys_works() { res, expected_keys .iter() - .filter(|&i| i > &"3a636f6465".to_string()) + .filter(|&i| *i > "3a636f6465") .take(8) .cloned() - .collect::>() + .collect::>() ); const SOME_BALANCE_KEY : &str = "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9e2c1dc507e2035edbbd8776c440d870460c57f0008067cc01c5ff9eb2e2f9b3a94299a915a91198bd1021a6c55596f57"; @@ -1818,10 +1809,10 @@ fn storage_keys_works() { res, expected_keys .iter() - .filter(|&i| i > &SOME_BALANCE_KEY.to_string()) + .filter(|&i| *i > SOME_BALANCE_KEY) .take(8) .cloned() - .collect::>() + .collect::>() ); } @@ -1850,7 +1841,6 @@ fn cleans_up_closed_notification_sinks_on_block_import() { genesis_block_builder, None, None, - None, Box::new(TaskExecutor::new()), client_config, ) @@ -2001,3 +1991,41 @@ fn use_dalek_ext_works() { .verify_ed25519(a1.hash(), zero_ed_sig(), zero_ed_pub(), vec![]) .unwrap()); } + +#[test] +fn finalize_after_best_block_updates_best() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(a1.hash(), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(a2.hash(), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + let (header, extrinsics) = a3.clone().deconstruct(); + let mut import_params = BlockImportParams::new(BlockOrigin::Own, header); + import_params.body = Some(extrinsics); + import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + block_on(client.import_block(import_params)).unwrap(); + + assert_eq!(client.chain_info().best_hash, a2.hash()); + + client.finalize_block(a3.hash(), None).unwrap(); + + assert_eq!(client.chain_info().finalized_hash, a3.hash()); + assert_eq!(client.chain_info().best_hash, a3.hash()); +} diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 11c672db8cb90..38a811acc7401 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -245,7 +245,6 @@ fn node_config< chain_spec: Box::new((*spec).clone()), wasm_method: Default::default(), wasm_runtime_overrides: Default::default(), - execution_strategies: Default::default(), rpc_addr: Default::default(), rpc_max_connections: Default::default(), rpc_cors: None, diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 13439de110155..f7df4e9c8ce3f 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } log = "0.4.17" parking_lot = "0.12.1" -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index 936aeb6e3cd82..8668dbfa8ba03 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -13,10 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } -futures = "0.3.21" -futures-timer = "3.0.2" log = "0.4.17" parking_lot = "0.12.1" parity-db = "0.4.8" @@ -25,10 +21,10 @@ sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-keystore = { version = "4.0.0-dev", path = "../../client/keystore" } [dev-dependencies] tempfile = "3.1.0" diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 2e2bb3bd3b430..da0af08b45402 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -54,12 +54,15 @@ pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use metrics::MetricsLink as PrometheusMetrics; use parking_lot::RwLock; use prometheus_endpoint::Registry as PrometheusRegistry; +use sc_keystore::LocalKeystore; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; -use sp_core::{hexdisplay::HexDisplay, traits::SpawnNamed, Decode, Encode}; +use sp_core::{crypto::UncheckedFrom, hexdisplay::HexDisplay, traits::SpawnNamed, Decode, Encode}; use sp_runtime::traits::Block as BlockT; use sp_statement_store::{ - runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement}, + runtime_api::{ + InvalidStatement, StatementSource, StatementStoreExt, ValidStatement, ValidateStatement, + }, AccountId, BlockHash, Channel, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, SubmitResult, Topic, }; @@ -199,6 +202,7 @@ pub struct Store { + Send + Sync, >, + keystore: Arc, // Used for testing time_override: Option, metrics: PrometheusMetrics, @@ -236,7 +240,7 @@ impl Index { let priority = Priority(statement.priority().unwrap_or(0)); self.entries.insert(hash, (account, priority, statement.data_len())); self.total_size += statement.data_len(); - let mut account_info = self.accounts.entry(account).or_default(); + let account_info = self.accounts.entry(account).or_default(); account_info.data_size += statement.data_len(); if let Some(channel) = statement.channel() { account_info.channels.insert(channel, ChannelEntry { hash, priority }); @@ -477,6 +481,7 @@ impl Store { path: &std::path::Path, options: Options, client: Arc, + keystore: Arc, prometheus: Option<&PrometheusRegistry>, task_spawner: &dyn SpawnNamed, ) -> Result> @@ -491,8 +496,7 @@ impl Store { + 'static, Client::Api: ValidateStatement, { - let store = Arc::new(Self::new(path, options, client.clone(), prometheus)?); - client.execution_extensions().register_statement_store(store.clone()); + let store = Arc::new(Self::new(path, options, client, keystore, prometheus)?); // Perform periodic statement store maintenance let worker_store = store.clone(); @@ -517,6 +521,7 @@ impl Store { path: &std::path::Path, options: Options, client: Arc, + keystore: Arc, prometheus: Option<&PrometheusRegistry>, ) -> Result where @@ -530,7 +535,7 @@ impl Store { let mut config = parity_db::Options::with_columns(&path, col::COUNT); - let mut statement_col = &mut config.columns[col::STATEMENTS as usize]; + let statement_col = &mut config.columns[col::STATEMENTS as usize]; statement_col.ref_counted = false; statement_col.preimage = true; statement_col.uniform = true; @@ -565,6 +570,7 @@ impl Store { db, index: RwLock::new(Index::new(options)), validate_fn, + keystore, time_override: None, metrics: PrometheusMetrics::new(prometheus), }; @@ -696,6 +702,11 @@ impl Store { fn set_time(&mut self, time: u64) { self.time_override = Some(time); } + + /// Returns `self` as [`StatementStoreExt`]. + pub fn as_statement_store_ext(self: Arc) -> StatementStoreExt { + StatementStoreExt::new(self) + } } impl StatementStore for Store { @@ -762,7 +773,45 @@ impl StatementStore for Store { /// Return the decrypted data of all known statements whose decryption key is identified as /// `dest`. The key must be available to the client. fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { - self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) + self.collect_statements(Some(dest), match_all_topics, |statement| { + if let (Some(key), Some(_)) = (statement.decryption_key(), statement.data()) { + let public: sp_core::ed25519::Public = UncheckedFrom::unchecked_from(key); + let public: sp_statement_store::ed25519::Public = public.into(); + match self.keystore.key_pair::(&public) { + Err(e) => { + log::debug!( + target: LOG_TARGET, + "Keystore error: {:?}, for statement {:?}", + e, + HexDisplay::from(&statement.hash()) + ); + None + }, + Ok(None) => { + log::debug!( + target: LOG_TARGET, + "Keystore is missing key for statement {:?}", + HexDisplay::from(&statement.hash()) + ); + None + }, + Ok(Some(pair)) => match statement.decrypt_private(&pair.into_inner()) { + Ok(r) => r, + Err(e) => { + log::debug!( + target: LOG_TARGET, + "Decryption error: {:?}, for statement {:?}", + e, + HexDisplay::from(&statement.hash()) + ); + None + }, + }, + } + } else { + None + } + }) } /// Submit a statement to the store. Validates the statement and returns validation result. @@ -881,6 +930,7 @@ impl StatementStore for Store { #[cfg(test)] mod tests { use crate::Store; + use sc_keystore::Keystore; use sp_core::Pair; use sp_statement_store::{ runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, @@ -910,6 +960,7 @@ mod tests { RuntimeApi { _inner: self.clone() }.into() } } + sp_api::mock_impl_runtime_apis! { impl ValidateStatement for RuntimeApi { fn validate_statement( @@ -977,7 +1028,8 @@ mod tests { let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp_dir.path().into(); path.push("db"); - let store = Store::new(&path, Default::default(), client, None).unwrap(); + let keystore = std::sync::Arc::new(sc_keystore::LocalKeystore::in_memory()); + let store = Store::new(&path, Default::default(), client, keystore, None).unwrap(); (store, temp_dir) // return order is important. Store must be dropped before TempDir } @@ -1080,12 +1132,13 @@ mod tests { assert_eq!(store.statements().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1.clone())); + let keystore = store.keystore.clone(); drop(store); let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); - let store = Store::new(&path, Default::default(), client, None).unwrap(); + let store = Store::new(&path, Default::default(), client, keystore, None).unwrap(); assert_eq!(store.statements().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1)); @@ -1190,7 +1243,6 @@ mod tests { statement(2, 4, None, 1000).hash(), statement(3, 4, Some(3), 300).hash(), statement(3, 5, None, 500).hash(), - //statement(4, 6, None, 100).hash(), ]; expected_statements.sort(); let mut statements: Vec<_> = @@ -1214,13 +1266,31 @@ mod tests { store.set_time(DEFAULT_PURGE_AFTER_SEC + 1); store.maintain(); assert_eq!(store.index.read().expired.len(), 0); + let keystore = store.keystore.clone(); drop(store); let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); - let store = Store::new(&path, Default::default(), client, None).unwrap(); + let store = Store::new(&path, Default::default(), client, keystore, None).unwrap(); assert_eq!(store.statements().unwrap().len(), 0); assert_eq!(store.index.read().expired.len(), 0); } + + #[test] + fn posted_clear_decrypts() { + let (store, _temp) = test_store(); + let public = store + .keystore + .ed25519_generate_new(sp_core::crypto::key_types::STATEMENT, None) + .unwrap(); + let statement1 = statement(1, 1, None, 100); + let mut statement2 = statement(1, 2, None, 0); + let plain = b"The most valuable secret".to_vec(); + statement2.encrypt(&plain, &public).unwrap(); + store.submit(statement1, StatementSource::Network); + store.submit(statement2, StatementSource::Network); + let posted_clear = store.posted_clear(&[], public.into()).unwrap(); + assert_eq!(posted_clear, vec![plain]); + } } diff --git a/client/storage-monitor/Cargo.toml b/client/storage-monitor/Cargo.toml index 5f682550282d4..02c67dedc59cc 100644 --- a/client/storage-monitor/Cargo.toml +++ b/client/storage-monitor/Cargo.toml @@ -10,11 +10,9 @@ homepage = "https://substrate.io" [dependencies] clap = { version = "4.2.5", features = ["derive", "string"] } -futures = "0.3.21" log = "0.4.17" fs4 = "0.6.3" sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } -sc-utils = { version = "4.0.0-dev", path = "../utils" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } tokio = "1.22.0" thiserror = "1.0.30" diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 5447c809c0795..79013cbd5b33a 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -12,9 +12,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" thiserror = "1.0.30" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } @@ -23,4 +23,4 @@ sc-consensus-babe = { version = "0.10.0-dev", path = "../consensus/babe" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../consensus/epochs" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../consensus/grandpa" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml index 73aa7b8d10422..1435511915732 100644 --- a/client/sysinfo/Cargo.toml +++ b/client/sysinfo/Cargo.toml @@ -20,12 +20,12 @@ log = "0.4.17" rand = "0.8.5" rand_pcg = "0.3.1" regex = "1" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } -sp-std = { version = "5.0.0", path = "../../primitives/std" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../primitives/std" } [dev-dependencies] -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 1d4758c7d261b..f138557c8c229 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -22,7 +22,7 @@ parking_lot = "0.12.1" pin-project = "1.0.12" sc-utils = { version = "4.0.0-dev", path = "../utils" } rand = "0.8.5" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.85" thiserror = "1.0.30" wasm-timer = "0.2.5" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 9312c04e9f6db..fa229782a781e 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -19,24 +19,22 @@ chrono = "0.4.19" lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } -once_cell = "1.8.0" parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" -serde = "1.0.136" +serde = "1.0.163" thiserror = "1.0.30" tracing = "0.1.29" tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } sc-tracing-proc-macro = { version = "4.0.0-dev", path = "./proc-macro" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } [dev-dependencies] criterion = "0.4.0" diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 54078fe1f2cae..4ae836e608367 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = { version = "1.0.26", features = ["proc-macro"] } -syn = { version = "2.0.14", features = ["proc-macro", "full", "extra-traits", "parsing"] } +quote = { version = "1.0.28", features = ["proc-macro"] } +syn = { version = "2.0.16", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index bd5045fed7f11..2107943cf6a5a 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -328,6 +328,10 @@ where } fn on_event(&self, event: &Event<'_>, ctx: Context) { + if !self.check_target(event.metadata().target(), &event.metadata().level()) { + return + } + let parent_id = event.parent().cloned().or_else(|| { if event.is_contextual() { ctx.lookup_current().map(|span| span.id()) diff --git a/client/tracing/src/logging/directives.rs b/client/tracing/src/logging/directives.rs index 3985bf2d88c64..f1caf1a13a2de 100644 --- a/client/tracing/src/logging/directives.rs +++ b/client/tracing/src/logging/directives.rs @@ -14,18 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use once_cell::sync::OnceCell; use parking_lot::Mutex; +use std::sync::OnceLock; use tracing_subscriber::{ filter::Directive, fmt as tracing_fmt, layer, reload::Handle, EnvFilter, Registry, }; // Handle to reload the tracing log filter -static FILTER_RELOAD_HANDLE: OnceCell> = OnceCell::new(); +static FILTER_RELOAD_HANDLE: OnceLock> = OnceLock::new(); // Directives that are defaulted to when resetting the log filter -static DEFAULT_DIRECTIVES: OnceCell>> = OnceCell::new(); +static DEFAULT_DIRECTIVES: OnceLock>> = OnceLock::new(); // Current state of log filter -static CURRENT_DIRECTIVES: OnceCell>> = OnceCell::new(); +static CURRENT_DIRECTIVES: OnceLock>> = OnceLock::new(); /// Add log filter directive(s) to the defaults /// diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 6338f8127aa1c..1718751e6d83c 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -14,14 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" -num-traits = "0.2.8" parking_lot = "0.12.1" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } @@ -29,13 +28,13 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "./api" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" assert_matches = "1.3.0" criterion = "0.4.0" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index e14a3ff4f3839..93efa05415d18 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -10,12 +10,14 @@ description = "Transaction pool client facing API." [dependencies] async-trait = "0.1.57" +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" -serde = { version = "1.0.136", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } thiserror = "1.0.30" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0" diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index 428d0aed62efe..32fe30f4584f0 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -24,11 +24,12 @@ pub mod error; use async_trait::async_trait; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp_core::offchain::TransactionPoolExt; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Member, NumberFor}, }; -use std::{collections::HashMap, hash::Hash, pin::Pin, sync::Arc}; +use std::{collections::HashMap, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; const LOG_TARGET: &str = "txpool::api"; @@ -305,6 +306,20 @@ pub enum ChainEvent { }, } +impl ChainEvent { + /// Returns the block hash associated to the event. + pub fn hash(&self) -> B::Hash { + match self { + Self::NewBestBlock { hash, .. } | Self::Finalized { hash, .. } => *hash, + } + } + + /// Is `self == Self::Finalized`? + pub fn is_finalized(&self) -> bool { + matches!(self, Self::Finalized { .. }) + } +} + /// Trait for transaction pool maintenance. #[async_trait] pub trait MaintainedTransactionPool: TransactionPool { @@ -329,29 +344,43 @@ pub trait LocalTransactionPool: Send + Sync { /// `TransactionSource::Local`. fn submit_local( &self, - at: &BlockId, + at: ::Hash, xt: LocalTransactionFor, ) -> Result; } -/// An abstraction for transaction pool. +impl LocalTransactionPool for Arc { + type Block = T::Block; + + type Hash = T::Hash; + + type Error = T::Error; + + fn submit_local( + &self, + at: ::Hash, + xt: LocalTransactionFor, + ) -> Result { + (**self).submit_local(at, xt) + } +} + +/// An abstraction for [`LocalTransactionPool`] /// -/// This trait is used by offchain calls to be able to submit transactions. -/// The main use case is for offchain workers, to feed back the results of computations, -/// but since the transaction pool access is a separate `ExternalitiesExtension` it can -/// be also used in context of other offchain calls. For one may generate and submit -/// a transaction for some misbehavior reports (say equivocation). -pub trait OffchainSubmitTransaction: Send + Sync { +/// We want to use a transaction pool in [`OffchainTransactionPoolFactory`] in a `Arc` without +/// bleeding the associated types besides the `Block`. Thus, this abstraction here exists to achieve +/// the wrapping in a `Arc`. +trait OffchainSubmitTransaction: Send + Sync { /// Submit transaction. /// /// The transaction will end up in the pool and be propagated to others. - fn submit_at(&self, at: &BlockId, extrinsic: Block::Extrinsic) -> Result<(), ()>; + fn submit_at(&self, at: Block::Hash, extrinsic: Block::Extrinsic) -> Result<(), ()>; } impl OffchainSubmitTransaction for TPool { fn submit_at( &self, - at: &BlockId, + at: ::Hash, extrinsic: ::Extrinsic, ) -> Result<(), ()> { log::debug!( @@ -372,6 +401,54 @@ impl OffchainSubmitTransaction for TP } } +/// Factory for creating [`TransactionPoolExt`]s. +/// +/// This provides an easy way for creating [`TransactionPoolExt`] extensions for registering them in +/// the wasm execution environment to send transactions from an offchain call to the runtime. +#[derive(Clone)] +pub struct OffchainTransactionPoolFactory { + pool: Arc>, +} + +impl OffchainTransactionPoolFactory { + /// Creates a new instance using the given `tx_pool`. + pub fn new + 'static>(tx_pool: T) -> Self { + Self { pool: Arc::new(tx_pool) as Arc<_> } + } + + /// Returns an instance of [`TransactionPoolExt`] bound to the given `block_hash`. + /// + /// Transactions that are being submitted by this instance will be submitted with `block_hash` + /// as context for validation. + pub fn offchain_transaction_pool(&self, block_hash: Block::Hash) -> TransactionPoolExt { + TransactionPoolExt::new(OffchainTransactionPool { pool: self.pool.clone(), block_hash }) + } +} + +/// Wraps a `pool` and `block_hash` to implement [`sp_core::offchain::TransactionPool`]. +struct OffchainTransactionPool { + block_hash: Block::Hash, + pool: Arc>, +} + +impl sp_core::offchain::TransactionPool for OffchainTransactionPool { + fn submit_transaction(&mut self, extrinsic: Vec) -> Result<(), ()> { + let extrinsic = match codec::Decode::decode(&mut &extrinsic[..]) { + Ok(t) => t, + Err(e) => { + log::error!( + target: LOG_TARGET, + "Failed to decode extrinsic in `OffchainTransactionPool::submit_transaction`: {e:?}" + ); + + return Err(()) + }, + }; + + self.pool.submit_at(self.block_hash, extrinsic) + } +} + /// Wrapper functions to keep the API backwards compatible over the wire for the old RPC spec. mod v1_compatible { use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -395,6 +472,29 @@ mod v1_compatible { } } +/// Transaction pool that rejects all submitted transactions. +/// +/// Could be used for example in tests. +pub struct RejectAllTxPool(PhantomData); + +impl Default for RejectAllTxPool { + fn default() -> Self { + Self(PhantomData) + } +} + +impl LocalTransactionPool for RejectAllTxPool { + type Block = Block; + + type Hash = Block::Hash; + + type Error = error::Error; + + fn submit_local(&self, _: Block::Hash, _: Block::Extrinsic) -> Result { + Err(error::Error::ImmediatelyDropped) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index f9d79ee429e6c..871d8e9c81707 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -186,7 +186,7 @@ where &self, ex: &graph::ExtrinsicFor, ) -> (graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) + ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) } fn block_header( diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs index ed6b3d186694f..85c572c127e84 100644 --- a/client/transaction-pool/src/enactment_state.rs +++ b/client/transaction-pool/src/enactment_state.rs @@ -19,10 +19,9 @@ //! Substrate transaction pool implementation. use crate::LOG_TARGET; -use num_traits::CheckedSub; use sc_transaction_pool_api::ChainEvent; use sp_blockchain::TreeRoute; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Saturating}; /// The threshold since the last update where we will skip any maintenance for blocks. /// @@ -101,17 +100,16 @@ where TreeRouteF: Fn(Block::Hash, Block::Hash) -> Result, String>, BlockNumberF: Fn(Block::Hash) -> Result>, String>, { - let (new_hash, current_hash, finalized) = match event { - ChainEvent::NewBestBlock { hash, .. } => (*hash, self.recent_best_block, false), - ChainEvent::Finalized { hash, .. } => (*hash, self.recent_finalized_block, true), - }; + let new_hash = event.hash(); + let finalized = event.is_finalized(); // do not proceed with txpool maintain if block distance is to high - let skip_maintenance = match (hash_to_number(new_hash), hash_to_number(current_hash)) { - (Ok(Some(new)), Ok(Some(current))) => - new.checked_sub(¤t) > Some(SKIP_MAINTENANCE_THRESHOLD.into()), - _ => true, - }; + let skip_maintenance = + match (hash_to_number(new_hash), hash_to_number(self.recent_best_block)) { + (Ok(Some(new)), Ok(Some(current))) => + new.saturating_sub(current) > SKIP_MAINTENANCE_THRESHOLD.into(), + _ => true, + }; if skip_maintenance { log::debug!(target: LOG_TARGET, "skip maintain: tree_route would be too long"); @@ -131,10 +129,10 @@ where log::debug!( target: LOG_TARGET, - "resolve hash:{:?} finalized:{:?} tree_route:{:?} best_block:{:?} finalized_block:{:?}", - new_hash, - finalized, - tree_route, + "resolve hash: {new_hash:?} finalized: {finalized:?} \ + tree_route: (common {:?}, last {:?}) best_block: {:?} finalized_block:{:?}", + tree_route.common_block(), + tree_route.last(), self.recent_best_block, self.recent_finalized_block ); diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index c3a85a373ba31..80e5925194c68 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -396,9 +396,6 @@ where client.usage_info().chain.finalized_hash, )); - // make transaction pool available for off-chain runtime calls. - client.execution_extensions().register_transaction_pool(&pool); - pool } } @@ -421,7 +418,7 @@ where fn submit_local( &self, - at: &BlockId, + at: Block::Hash, xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { use sp_runtime::{ @@ -430,7 +427,11 @@ where let validity = self .api - .validate_transaction_blocking(at, TransactionSource::Local, xt.clone())? + .validate_transaction_blocking( + &BlockId::hash(at), + TransactionSource::Local, + xt.clone(), + )? .map_err(|e| { Self::Error::Pool(match e { TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), @@ -441,7 +442,7 @@ where let (hash, bytes) = self.pool.validated_pool().api().hash_and_length(&xt); let block_number = self .api - .block_id_to_number(at)? + .block_id_to_number(&BlockId::hash(at))? .ok_or_else(|| error::Error::BlockIdConversion(format!("{:?}", at)))?; let validated = ValidatedTransaction::valid_at( diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index ac029d71700da..4adf811b42521 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -39,7 +39,7 @@ use sp_runtime::{ }; use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ - runtime::{Block, Extrinsic, ExtrinsicBuilder, Hash, Header, Index, Transfer, TransferData}, + runtime::{Block, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData}, AccountKeyring::*, ClientBlockImportExt, }; @@ -119,7 +119,7 @@ fn early_nonce_should_be_culled() { .ready() .map(|a| TransferData::try_from(&a.data).unwrap().nonce) .collect(); - assert_eq!(pending, Vec::::new()); + assert_eq!(pending, Vec::::new()); } #[test] @@ -132,7 +132,7 @@ fn late_nonce_should_be_queued() { .ready() .map(|a| TransferData::try_from(&a.data).unwrap().nonce) .collect(); - assert_eq!(pending, Vec::::new()); + assert_eq!(pending, Vec::::new()); block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool @@ -182,7 +182,7 @@ fn should_ban_invalid_transactions() { .ready() .map(|a| TransferData::try_from(&a.data).unwrap().nonce) .collect(); - assert_eq!(pending, Vec::::new()); + assert_eq!(pending, Vec::::new()); // then block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 38484285d3065..40b742f373886 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -17,7 +17,7 @@ lazy_static = "1.4.0" log = "0.4" parking_lot = "0.12.1" prometheus = { version = "0.13.0", default-features = false } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } [features] default = ["metered"] diff --git a/client/utils/src/mpsc.rs b/client/utils/src/mpsc.rs index 7e06bd203b010..36e44be5e2950 100644 --- a/client/utils/src/mpsc.rs +++ b/client/utils/src/mpsc.rs @@ -18,8 +18,10 @@ //! Code to meter unbounded channels. +pub use async_channel::{TryRecvError, TrySendError}; + use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; -use async_channel::{Receiver, Sender, TryRecvError, TrySendError}; +use async_channel::{Receiver, Sender}; use futures::{ stream::{FusedStream, Stream}, task::{Context, Poll}, diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 0f45a593de0ae..63294d90e9d06 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -35,11 +35,17 @@ /client/sysinfo/ @koute /client/tracing/ @koute +# Documentation audit +/primitives/runtime @paritytech/docs-audit +/primitives/arithmetic @paritytech/docs-audit +# /primitives/core (to be added later) +# /primitives/io (to be added later) + # FRAME -/frame/ @paritytech/frame-coders -/frame/nfts/ @jsidorenko +/frame/ @paritytech/frame-coders @paritytech/docs-audit +/frame/nfts/ @jsidorenko @paritytech/docs-audit /frame/state-trie-migration/ @paritytech/frame-coders @cheme -/frame/uniques/ @jsidorenko +/frame/uniques/ @jsidorenko @paritytech/docs-audit # GRANDPA, BABE, consensus stuff /client/consensus/babe/ @andresilva @@ -57,12 +63,12 @@ /primitives/merkle-mountain-range/ @acatangiu # Contracts -/frame/contracts/ @athei +/frame/contracts/ @athei @paritytech/docs-audit # NPoS and election -/frame/election-provider-multi-phase/ @paritytech/staking-core -/frame/election-provider-support/ @paritytech/staking-core -/frame/elections-phragmen/ @paritytech/staking-core -/frame/nomination-pools/ @paritytech/staking-core -/frame/staking/ @paritytech/staking-core -/primitives/npos-elections/ @paritytech/staking-core +/frame/election-provider-multi-phase/ @paritytech/staking-core @paritytech/docs-audit +/frame/election-provider-support/ @paritytech/staking-core @paritytech/docs-audit +/frame/elections-phragmen/ @paritytech/staking-core @paritytech/docs-audit +/frame/nomination-pools/ @paritytech/staking-core @paritytech/docs-audit +/frame/staking/ @paritytech/staking-core @paritytech/docs-audit +/primitives/npos-elections/ @paritytech/staking-core @paritytech/docs-audit diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc deleted file mode 100644 index 759141a62dfc7..0000000000000 --- a/docs/CONTRIBUTING.adoc +++ /dev/null @@ -1,131 +0,0 @@ -= Contributing - -The `Substrate` project is an **OPENISH Open Source Project** - -== What? - -Individuals making significant and valuable contributions are given commit-access to a project to contribute as they see fit. A project is more like an open wiki than a standard guarded open source project. - -== Rules - -There are a few basic ground-rules for contributors (including the maintainer(s) of the project): - -. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). -. **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. -. **All modifications** must be made in a **pull-request** to solicit feedback from other contributors. -. A pull-request *must not be merged until CI* has finished successfully. -. Contributors should adhere to the link:STYLE_GUIDE.md[house coding style]. - - -== Merge Process - -*In General* - -A Pull Request (PR) needs to be reviewed and approved by project maintainers unless: - -- it does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged https://github.com/paritytech/substrate/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3AA2-insubstantial[`insubstantial`] and merged by its author once CI is complete. -- it is an urgent fix with no large change to logic, then it may be merged after a non-author contributor has approved the review once CI is complete. - -*Labels TLDR:* - -- `A-*` Pull request status. ONE REQUIRED. -- `B-*` Changelog and/or Runtime-upgrade post composition markers. ONE REQUIRED. (used by automation) -- `C-*` Release notes release-criticality markers. EXACTLY ONE REQUIRED. (used by automation) -- `D-*` Audit tags denoting auditing requirements on the PR. - -*Process:* - -. Please tag each PR with exactly one `A`, `B`, `C` and `D` label at the minimum. -. When tagging a PR, it should be done while keeping all downstream users in mind. Downstream users are not just Polkadot or system parachains, but also all the other parachains and solo chains that are using Substrate. The labels are used by downstream users to track changes and to include these changes properly into their own releases. -. Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-please_review+[`A0-please_review`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -. If the first review is not an approval, swap `A0-please_review` to any label `[A3, A5]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-in_progress[`A3-in_progress`] is a general indicator that the PR is work in progress. -. PRs must be tagged with `B*` labels to signal if a change is note worthy for downstream users. The respective `T*` labels should be added to signal the component that was changed. `B0-silent` must only be used for changes that don't require any attention by downstream users. -. PRs must be tagged with their release importance via the `C1-C7` labels. The release importance is only informing about how important it is to apply a release that contains the change. -. PRs must be tagged with their audit requirements via the `D1-D9` labels. -. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E0-runtime_migration[`E0-runtime_migration`]. See the https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18[Migration Best Practices here] for more info about how to test runtime migrations. -. PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-database_migration[`E1-database_migration`]. -. PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E3-host_functions[`E3-host_functions`]. -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/F3-breaks_API[`F3-breaks_API`]. -. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with https://github.com/paritytech/substrate/labels/F1-breaks_authoring[`F1-breaks_authoring`]. -. PRs that "break everything" must be tagged with https://github.com/paritytech/substrate/labels/F0-breaks_everything[`F0-breaks_everything`]. -. PRs should be categorized into projects. -. No PR should be merged until all reviews' comments are addressed and CI is successful. - -*Noting relevant changes:* - -When breaking APIs, it should be mentioned on what was changed in the PR description alongside some examples on how to change the code to make it work/compile. - -The PR description should also mention potential storage migrations and if they require some special setup aside adding it to the list of migrations in the runtime. - -*Reviewing pull requests:* - -When reviewing a pull request, the end-goal is to suggest useful changes to the author. Reviews should finish with approval unless there are issues that would result in: - -. Buggy behavior. -. Undue maintenance burden. -. Breaking with house coding style. -. Pessimization (i.e. reduction of speed as measured in the projects benchmarks). -. Feature reduction (i.e. it removes some aspect of functionality that a significant minority of users rely on). -. Uselessness (i.e. it does not strictly add a feature or fix a known issue). - -*Reviews may not be used as an effective veto for a PR because*: - -. There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix. -. It does not fit well with some other contributors' longer-term vision for the project. - -=== Updating Polkadot as well - -**All pull requests will be checked against either Polkadot master, or your provided Polkadot companion PR**. That is, If your PR changes the external APIs or interfaces used by Polkadot. If you tagged the PR with `breaksapi` or `breaksconsensus` this is most certainly the case, in all other cases check for it by running step 1 below. - -To create a Polkadot companion PR: - -. Pull latest Polkadot master (or clone it, if you haven't yet). -. Override substrate deps to point to your local path or branch using https://github.com/bkchr/diener. (E.g. from the polkadot clone dir run `diener patch --crates-to-patch ../substrate --substrate` assuming substrate clone is in a sibling dir. If you do use diener, ensure that you _do not_ commit the changes diener makes to the Cargo.tomls.) -. Make the changes required and build polkadot locally. -. Submit all this as a PR against the Polkadot Repo. -. In the _description_ of your _Substrate_ PR add "polkadot companion: [Polkadot_PR_URL]" -. Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. -. Someone will need to approve the Polkadot PR before the Substrate CI will go green. (The Polkadot CI failing can be ignored as long as the polkadot job in the _substrate_ PR is green). -. Wait for reviews on both the Substrate and the Polkadot PRs. -. Once the Substrate PR runs green, a member of the `parity` github group can comment on the Substrate PR with `bot merge` which will: - - Merge the Substrate PR. - - The bot will push a commit to the Polkadot PR updating its Substrate reference. (effecively doing `cargo update -p sp-io`) - - If the polkadot PR origins from a fork then a project member may need to press `approve run` on the polkadot PR. - - The bot will merge the Polkadot PR once all its CI `{"build_allow_failure":false}` checks are green. - Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. - (Hint: it's recommended to use `bot merge` to merge all substrate PRs, not just ones with a polkadot companion.) - -If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/E6-needs_polkadot_pr[`E6-needs_polkadot_pr`] to prevent it from getting automatically merged. In most cases the CI will add this label automatically. - -As there might be multiple pending PRs that might conflict with one another, a) you should not merge the substrate PR until the Polkadot PR has also been reviewed and b) both should be merged pretty quickly after another to not block others. - -== Helping out - -We use https://paritytech.github.io/labels/doc_substrate.html[labels] to manage PRs and issues and communicate state of a PR. Please familiarize yourself with them. Best way to get started is to a pick a ticket tagged https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ1-easy[`easy`] or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ2-medium[`medium`] and get going or https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ6-mentor[`mentor`] and get in contact with the mentor offering their support on that larger task. - -== Issues -Please label issues with the following labels: - -. `I-*` or `J-*` Issue severity and type. EXACTLY ONE REQUIRED. -. `U-*` Issue urgency, suggesting in what time manner does this issue need to be resolved. AT MOST ONE ALLOWED. -. `Z-*` Issue difficulty. AT MOST ONE ALLOWED. - -== Releases - -Declaring formal releases remains the prerogative of the project maintainer(s). - -== UI tests - -UI tests are used for macros to ensure that the output of a macro doesn't change and is in the expected format. These UI tests are sensible to any changes -in the macro generated code or to switching the rust stable version. The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when -the CI is for example complaining about failing UI tests and it is expected that they fail these tests need to be executed locally. To simplify the updating -of the UI test ouput there is the `.maintain/update-rust-stable.sh` script. This can be run with `.maintain/update-rust-stable.sh CURRENT_STABLE_VERSION` -and then it will run all UI tests to update the expected output. - -== Changes to this arrangement - -This is an experiment and feedback is welcome! This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change. - -== Heritage - -These contributing guidelines are modified from the "OPEN Open Source Project" guidelines for the Level project: https://github.com/Level/community/blob/master/CONTRIBUTING.md diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 0000000000000..cbaf6206e78f2 --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,134 @@ +# Contributing + +The `Substrate` project is an ***OPENISH Open Source Project*** + +Contributors are invited to our `#frame-contributors` channel on the Polkadot Discord for support and coordination: +[![Discord](https://img.shields.io/discord/722223075629727774?style=for-the-badge&logo=discord&label=Discord)](https://dot.li/discord) + +## What? + +Individuals making significant and valuable contributions are given commit-access to a project to contribute as they see fit. A project is more like an open wiki than a standard guarded open source project. + +## Rules + +There are a few basic ground-rules for contributors (including the maintainer(s) of the project): + +1. ***No `--force` pushes*** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). +2. ***Non-master branches***, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. +3. ***All modifications*** must be made in a ***pull-request*** to solicit feedback from other contributors. +4. A pull-request **must not be merged until CI** has finished successfully. +5. Contributors should adhere to the [house coding style](STYLE_GUIDE.md). +6. Contributors should adhere to the [house documenting style](DOCUMENTATION_GUIDELINES.md), when applicable. + +## Merge Process + +**In General** + +A Pull Request (PR) needs to be reviewed and approved by project maintainers unless: + +* it does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged [`insubstantial`](https://github.com/paritytech/substrate/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3AA2-insubstantial) and merged by its author once CI is complete. +* it is an urgent fix with no large change to logic, then it may be merged after a non-author contributor has approved the review once CI is complete. + +**Labels TLDR:** + +* `A-*` Pull request status. ONE REQUIRED. +* `B-*` Changelog and/or Runtime-upgrade post composition markers. ONE REQUIRED. (used by automation) +* `C-*` Release notes release-criticality markers. EXACTLY ONE REQUIRED. (used by automation) +* `D-*` Audit tags denoting auditing requirements on the PR. + +**Process:** + +1. Please tag each PR with exactly one `A`, `B`, `C` and `D` label at the minimum. +2. When tagging a PR, it should be done while keeping all downstream users in mind. Downstream users are not just Polkadot or system parachains, but also all the other parachains and solo chains that are using Substrate. The labels are used by downstream users to track changes and to include these changes properly into their own releases. +3. Once a PR is ready for review please add the [`A0-please_review`](https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-please_review+) label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. +4. If the first review is not an approval, swap `A0-please_review` to any label `[A3, A5]` to indicate that the PR has received some feedback, but needs further work. For example. [`A3-in_progress`](https://github.com/paritytech/substrate/labels/A3-in_progress) is a general indicator that the PR is work in progress. +5. PRs must be tagged with `B*` labels to signal if a change is note worthy for downstream users. The respective `T*` labels should be added to signal the component that was changed. `B0-silent` must only be used for changes that don’t require any attention by downstream users. +6. PRs must be tagged with their release importance via the `C1-C7` labels. The release importance is only informing about how important it is to apply a release that contains the change. +7. PRs must be tagged with their audit requirements via the `D1-D9` labels. +8. PRs that introduce runtime migrations must be tagged with [`E0-runtime_migration`](https://github.com/paritytech/substrate/labels/E0-runtime_migration). See the [Migration Best Practices here](https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18) for more info about how to test runtime migrations. +9. PRs that introduce irreversible database migrations must be tagged with [`E1-database_migration`](https://github.com/paritytech/substrate/labels/E1-database_migration). +10. PRs that add host functions must be tagged with with [`E3-host_functions`](https://github.com/paritytech/substrate/labels/E3-host_functions). +11. PRs that break the external API must be tagged with [`F3-breaks_API`](https://github.com/paritytech/substrate/labels/F3-breaks_API). +12. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with [`F1-breaks_authoring`](https://github.com/paritytech/substrate/labels/F1-breaks_authoring). +13. PRs that "break everything" must be tagged with [`F0-breaks_everything`](https://github.com/paritytech/substrate/labels/F0-breaks_everything). +14. PRs should be categorized into projects. +15. No PR should be merged until all reviews' comments are addressed and CI is successful. + +**Noting relevant changes:** + +When breaking APIs, it should be mentioned on what was changed in the PR description alongside some examples on how to change the code to make it work/compile. + +The PR description should also mention potential storage migrations and if they require some special setup aside adding it to the list of migrations in the runtime. + +**Reviewing pull requests:** + +When reviewing a pull request, the end-goal is to suggest useful changes to the author. Reviews should finish with approval unless there are issues that would result in: + +1. Buggy behavior. +2. Undue maintenance burden. +3. Breaking with house coding style. +4. Pessimization (i.e. reduction of speed as measured in the projects benchmarks). +5. Feature reduction (i.e. it removes some aspect of functionality that a significant minority of users rely on). +6. Uselessness (i.e. it does not strictly add a feature or fix a known issue). + +**Reviews may not be used as an effective veto for a PR because**: + +1. There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix. +2. It does not fit well with some other contributors' longer-term vision for the project. + +### Updating Polkadot as well + +***All pull requests will be checked against either Polkadot master, or your provided Polkadot companion PR***. That is, If your PR changes the external APIs or interfaces used by Polkadot. If you tagged the PR with `breaksapi` or `breaksconsensus` this is most certainly the case, in all other cases check for it by running step 1 below. + +To create a Polkadot companion PR: + +1. Pull latest Polkadot master (or clone it, if you haven’t yet). +2. Override substrate deps to point to your local path or branch using https://github.com/bkchr/diener. (E.g. from the Polkadot clone dir run `diener patch --crates-to-patch ../substrate --substrate` assuming substrate clone is in a sibling dir. If you do use diener, ensure that you _do not_ commit the changes diener makes to the Cargo.tomls.) +3. Make the changes required and build Polkadot locally. +4. Submit all this as a PR against the Polkadot Repo. +5. In the _description_ of your _Substrate_ PR add "Polkadot companion: [Polkadot_PR_URL]" +6. Now you should see that the `check_polkadot` CI job will build your Substrate PR against the mentioned Polkadot branch in your PR description. +7. Someone will need to approve the Polkadot PR before the Substrate CI will go green. (The Polkadot CI failing can be ignored as long as the Polkadot job in the _substrate_ PR is green). +8. Wait for reviews on both the Substrate and the Polkadot PRs. +9. Once the Substrate PR runs green, a member of the `parity` Github group can comment on the Substrate PR with `bot merge` which will: + * Merge the Substrate PR. + * The bot will push a commit to the Polkadot PR updating its Substrate reference. (effectively doing `cargo update -p sp-io`) + * If the Polkadot PR origins from a fork then a project member may need to press `approve run` on the Polkadot PR. + * The bot will merge the Polkadot PR once all its CI `{"build_allow_failure":false}` checks are green. + Note: The merge-bot currently doesn’t work with forks on org accounts, only individual accounts. + (Hint: it’s recommended to use `bot merge` to merge all substrate PRs, not just ones with a Polkadot companion.) + +If your PR is reviewed well, but a Polkadot PR is missing, signal it with [`E6-needs_polkadot_pr`](https://github.com/paritytech/substrate/labels/E6-needs_polkadot_pr) to prevent it from getting automatically merged. In most cases the CI will add this label automatically. + +As there might be multiple pending PRs that might conflict with one another, a) you should not merge the substrate PR until the Polkadot PR has also been reviewed and b) both should be merged pretty quickly after another to not block others. + +## Helping out + +We use [labels](https://paritytech.github.io/labels/doc_substrate.html) to manage PRs and issues and communicate state of a PR. Please familiarize yourself with them. The best way to get started is to a pick a ticket tagged [`easy`](https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ1-easy) or [`medium`](https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ2-medium) and get going or [`mentor`](https://github.com/paritytech/substrate/issues?q=is%3Aissue+is%3Aopen+label%3AZ6-mentor) and get in contact with the mentor offering their support on that larger task. + +## Issues +Please label issues with the following labels: + +1. `I-**` or `J-**` Issue severity and type. EXACTLY ONE REQUIRED. +2. `U-*` Issue urgency, suggesting in what time manner does this issue need to be resolved. AT MOST ONE ALLOWED. +3. `Z-*` Issue difficulty. AT MOST ONE ALLOWED. + +## Releases + +Declaring formal releases remains the prerogative of the project maintainer(s). + +## UI tests + +UI tests are used for macros to ensure that the output of a macro doesn’t change and is in the expected format. These UI tests are sensible to any changes +in the macro generated code or to switching the rust stable version. The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when +the CI is for example complaining about failing UI tests and it is expected that they fail these tests need to be executed locally. To simplify the updating +of the UI test output there is the `.maintain/update-rust-stable.sh` script. This can be run with `.maintain/update-rust-stable.sh CURRENT_STABLE_VERSION` +and then it will run all UI tests to update the expected output. + +## Changes to this arrangement + +This is an experiment and feedback is welcome! This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change. + +## Heritage + +These contributing guidelines are modified from the "OPEN Open Source Project" guidelines for the Level project: https://github.com/Level/community/blob/master/CONTRIBUTING.md diff --git a/docs/DOCUMENTATION_GUIDELINES.md b/docs/DOCUMENTATION_GUIDELINES.md new file mode 100644 index 0000000000000..0f83f5e6445d5 --- /dev/null +++ b/docs/DOCUMENTATION_GUIDELINES.md @@ -0,0 +1,264 @@ +# Substrate Documentation Guidelines + +This document is only focused on documenting parts of substrate that relates to its external API. The list of such crates can be found in [CODEOWNERS](./CODEOWNERS). Search for the crates that are auto-assigned to a team called `docs-audit`. + +These are crates that are often used by external developers and need more thorough documentation. These are the crates most concerned with FRAME development. + +- [Substrate Documentation Guidelines](#substrate-documentation-guidelines) + - [General/Non-Pallet Crates](#generalnon-pallet-crates) + - [What to Document?](#what-to-document) + - [Rust Docs vs. Code Comments](#rust-docs-vs-code-comments) + - [How to Document?](#how-to-document) + - [TLDR](#tldr) + - [Proc-Macros](#proc-macros) + - [Other Guidelines](#other-guidelines) + - [Document Through Code](#document-through-code) + - [Formatting Matters](#formatting-matters) + - [Pallet Crates](#pallet-crates) + - [Top Level Pallet Docs (`lib.rs`)](#top-level-pallet-docs-librs) + - [Polkadot and Substrate](#polkadot-and-substrate) + - [Dispatchables](#dispatchables) + - [Storage Items](#storage-items) + - [Errors and Events](#errors-and-events) + + +## General/Non-Pallet Crates + +First, consider the case for all such crates, except for those that are pallets. + +### What to Document? + +The first question is, what should you document? Use the following filter: + +1. In the crates assigned to `docs-audit` in [CODEOWNERS](./CODEOWNERS), +2. All `pub` item need to be documented. If it is not `pub`, it does not appear in the rust-docs, and is not public facing. + * Within `pub` items, sometimes they are only `pub` in order to be used by another internal crate, and you can foresee that this will not be used by anyone else other than you. These need **not** be documented thoroughly, and are left to your discretion to identify. + * Reminder: `trait` items are public by definition, if the trait is public. +3. All public modules (`mod`) should have reasonable module-level documentation (`//!`). + + +#### Rust Docs vs. Code Comments + +Note that anything starting with `///` is an external rust-doc, and everything starting with `//` does not appear in the rust-docs. It's important to not confuse the two in your documentation. + +```rust +/// Computes the square root of the input, returning `Ok(_)` if successful. +/// +/// # Errors +/// ... +/// +// Details about the complexity, how you implemented this, and some quirks that +// are NOT relevant to the external interface, so it starts with '//'. +// This can also be moved inside the function. +pub fn sqrt(x: u32) -> Result { + todo!(); +} +``` + +### How to Document? + +There are a few very good sources that you can look into: + +- https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html +- https://web.mit.edu/rust-lang_v1.25/arch/amd64_ubuntu1404/share/doc/rust/html/book/first-edition/documentation.html +- https://blog.guillaume-gomez.fr/articles/2020-03-12+Guide+on+how+to+write+documentation+for+a+Rust+crate + +As mentioned [here](https://web.mit.edu/rust-lang_v1.25/arch/amd64_ubuntu1404/share/doc/rust/html/book/first-edition/documentation.html#writing-documentation-comments) and [here](https://blog.guillaume-gomez.fr/articles/2020-03-12+Guide+on+how+to+write+documentation+for+a+Rust+crate), always start with a **single sentence** demonstrating what is being documented. All additional documentation should be added *after a newline*. Strive to make the first sentence succinct and short. The reason for this is the first paragraph of docs about an item (everything before the first newline) is used as the excerpt that rust doc displays about this item when it appears in tables, such as the table listing all functions in a module. If this excerpt is too long, the module docs will be very difficult to read. + +About [special sections](https://web.mit.edu/rust-lang_v1.25/arch/amd64_ubuntu1404/share/doc/rust/html/book/first-edition/documentation.html#special-sections), we will most likely not need to think about panic and safety in any runtime related code. Our code is never `unsafe`, and will (almost) never panic. + +Use `# Examples as much as possible. These are great ways to further demonstrate what your APIs are doing, and add free test coverage. As an additional benefit, any code in rust-docs is treated as an "integration tests", not unit tests, which tests your crate in a different way than unit tests. So, it is both a win for "more documentation" and a win for "more test coverage". + +You can also consider having an `# Error` section optionally. Of course, this only applies if there is a `Result` being returned, and if the `Error` variants are overly complicated. + +Strive to include correct links to other items in your written docs as much as possible. In other words, avoid `` `some_func` `` and instead use ``[`some_func`]``. +Read more about how to correctly use links in your rust-docs [here](https://doc.rust-lang.org/rustdoc/write-documentation/linking-to-items-by-name.html#valid-links) and [here](https://rust-lang.github.io/rfcs/1946-intra-rustdoc-links.html#additions-to-the-documentation-syntax). + + +> While you are linking, you might become conscious of the fact that you are in need of linking to (too many) foreign items in order to explain your API. This is leaning more towards API-Design rather than documentation, but it is a warning that the subject API might be slightly wrong. For example, most "glue" traits[^1] in `frame/support` should be designed and documented without making hard assumptions about particular pallets that implement them. + +#### TLDR + +0. Have the goal of enforcing `#![deny(missing_docs)]` mentally, even if it is not enforced by the compiler 🙈. +1. Start with a single, clear and concise sentence. Follow up with more context, after a newline, if needed. +2. Use examples as much as reasonably possible. +3. Use links as much as possible. +4. Think about context. If you are explaining a lot of foreign topics while documenting a trait that should not explicitly depend on them, you have likely not designed it properly. + +#### Proc-Macros + +Note that there are special considerations when documenting proc macros. Doc links will appear to function _within_ your proc macro crate, but often will no longer function when these proc macros are re-exported elsewhere in your project. The exception is doc links to _other proc macros_ which will function just fine if they are also being re-exported. It is also often necessary to disambiguate between a proc macro and a function of the same name, which can be done using the `macro@my_macro_name` syntax in your link. Read more about how to correctly use links in your rust-docs [here](https://doc.rust-lang.org/rustdoc/write-documentation/linking-to-items-by-name.html#valid-links) and [here](https://rust-lang.github.io/rfcs/1946-intra-rustdoc-links.html#additions-to-the-documentation-syntax). + + +### Other Guidelines + +The above five guidelines must always be reasonably respected in the documentation. + +The following are a set of notes that may not necessarily hold in all circumstances: + + +#### Document Through Code + +You should make sure that your code is properly-named and well-organized so that your code functions as a form of documentation. However, within the complexity of our projects in Polkadot/Substrate that is not enough. Particularly, things like examples, errors and panics cannot be documented only through properly-named and well-organized code. + +> Our north star is self-documenting code that also happens to be well-documented and littered with examples. + + +* Your written documents should *complement* the code, not *repeat* it. As an example, a documentation on top of a code example should never look like the following: + + ```rust + /// Sends request and handles the response. + trait SendRequestAndHandleResponse { + + } + ``` + +In the above example, the documentation has added no useful information not already contained within the properly-named trait and is redundant. + + +#### Formatting Matters + +The way you format your documents (newlines, heading and so on) makes a difference. Consider the below examples: + +```rust +/// This function works with input u32 x and multiplies it by two. If +/// we optimize the other variant of it, we would be able to achieve more +/// efficiency but I have to think about it. Probably can panic if the input +/// overflows u32. +fn multiply_by_2(x: u32) -> u32 { .. } +``` + +```rust +/// Multiplies an input of type [`u32`] by two. +/// +/// # Panics +/// +/// Panics if the input overflows. +/// +/// # Complexity +/// +/// Is implemented using some algorithm that yields complexity of O(1). +// More efficiency can be achieved if we improve this via such and such. +fn multiply_by_2(x: u32) -> u32 { .. } +``` + +They are both roughly conveying the same set of facts, but one is easier to follow because it was formatted cleanly. Especially for traits and types that you can foresee will be seen and used a lot, try and write a well formatted version. + +Similarly, make sure your comments are wrapped at 100 characters line-width (as defined by our [`rustfmt.toml`](../rustfmt.toml)), no **more and no less**! The more is fixed by `rustfmt` and our CI, but if you (for some unknown reason) wrap your lines at 59 characters, it will pass the CI, and it will not look good 🫣. Consider using a plugin like [rewrap](https://marketplace.visualstudio.com/items?itemName=stkb.rewrap) (for Visual Studio Code) to properly do this. + +[^1]: Those that help two pallets talk to each other. + +## Pallet Crates + +The guidelines so far have been general in nature, and are applicable to crates that are pallets and crates that're not pallets. + +The following is relevant to how to document parts of a crate that is a pallet. See [`pallet-fast-unstake`](../frame/fast-unstake/src/lib.rs) as one examples of adhering these guidelines. + +### Top Level Pallet Docs (`lib.rs`) + +For the top-level pallet docs, consider the following template: + +``` +//! # +//! +//! . +//! +//! ## Overview +//! +//! +//! +//! +//! +//! +//! +//! ### Example +//! +//! . +//! +//! ## Pallet API +//! +//! +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its configuration trait, dispatchables, storage items, events and errors. +//! +//! +//! +//! This section can most often be left as-is. +//! +//! ## Low Level / Implementation Details +//! +//! +//! +//! +//! +//! ### Design Goals (optional) +//! +//! +//! +//! ### Design (optional) +//! +//! +//! +//! ### Terminology (optional) +//! +//! +``` + +This template's details (heading 3s and beyond) are left flexible, and at the discretion of the developer to make the best final choice about. For example, you might want to include `### Terminology` or not. Moreover, you might find it more useful to include it in `## Overview`. + +Nonetheless, the high level flow of going from the most high level explanation to the most low level explanation is important to follow. + +As a rule of thumb, the Heading 2s (`##`) in this template can be considered a strict rule, while the Heading 3s (`###`) and beyond are flexible. + +#### Polkadot and Substrate + +Optionally, in order to demonstrate the relation between the two, you can start the pallet documentation with: + +``` +//! > Made with *Substrate*, for *Polkadot*. +//! +//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) - +//! [![polkadot]](https://polkadot.network) +//! +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +``` + +### Dispatchables + +For each dispatchable (`fn` item inside `#[pallet::call]`), consider the following template: + +``` +/// +/// +/// ## Dispatch Origin +/// +/// The dispatch origin of this call must be

+/// +/// ## Details +/// +/// +/// +/// ## Errors (optional) +/// +/// +/// +/// ## Events (optional) +/// +/// +pub fn name_of_dispatchable(origin: OriginFor, ...) -> DispatchResult {} +``` + +Consider the fact that these docs will be part of the metadata of the associated dispatchable, and might be used by wallets and explorers. + +### Storage Items + +1. If a map-like type is being used, always note the choice of your hashers as private code docs (`// Hasher X chosen because ...`). Recall that this is not relevant information to external people, so it must be documented as `//`. +2. Consider explaining the crypto-economics of how a deposit is being taken in return of the storage being used. +3. Consider explaining why it is safe for the storage item to be unbounded, if `#[pallet::unbounded]` or `#[pallet::without_storage_info]` is being used. + +### Errors and Events + +Consider the fact that, similar to dispatchables, these docs will be part of the metadata of the associated event/error, and might be used by wallets and explorers. + +Specifically for `error`, explain why the error has happened, and what can be done in order to avoid it. diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 12f39371892e7..d2bb22f6e245a 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -2,35 +2,34 @@ ✄ ----------------------------------------------------------------------------- -Thank you for your Pull Request! 🙏 - -Before you submit, please check that: - -- [ ] **Description:** You added a brief description of the PR, e.g.: - - What does it do? - - What important points should reviewers know? - - Is there something left for follow-up PRs? -- [ ] **Labels:** You labeled the PR appropriately if you have permissions to do so: - - [ ] `A*` for PR status (**one required**) - - [ ] `B*` for changelog (**one required**) - - [ ] `C*` for release notes (**exactly one required**) - - [ ] `D*` for various implications/requirements - - [ ] Github project assignment -- [ ] **Related Issues:** You mentioned a related issue if this PR is related to it, e.g. `Fixes #228` or `Related #1337`. -- [ ] **2 Reviewers:** You asked at least two reviewers to review. If you aren't sure, start with GH suggestions. -- [ ] **Style Guide:** Your PR adheres to [the style guide](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md) - - In particular, mind the maximal line length of 100 (120 in exceptional circumstances). - - There is no commented code checked in unless necessary. - - Any panickers in the runtime have a proof or were removed. -- [ ] **Runtime Version:** You bumped the runtime version if there are breaking changes in the **runtime**. -- [ ] **Docs:** You updated any rustdocs which may need to change. -- [ ] **Polkadot Companion:** Has the PR altered the external API or interfaces used by Polkadot? - - [ ] If so, do you have the corresponding Polkadot PR ready? - - [ ] Optionally: Do you have a corresponding Cumulus PR? - -Refer to [the contributing guide](https://github.com/paritytech/substrate/blob/master/docs/CONTRIBUTING.adoc) for details. - -After you've read this notice feel free to remove it. -Thank you! +Thank you for your Pull Request! 🙏 Please make sure it follows the contribution guidelines outlined in [this document](https://github.com/paritytech/substrate/blob/master/docs/CONTRIBUTING.md) and fill out the sections below. Once you're ready to submit your PR for review, please delete this section and leave only the text under the "Description" heading. + +# Description + +*Please include a summary of the changes and the related issue. Please also include relevant motivation and context, including:* + +- What does this PR do? +- Why are these changes needed? +- How were these changes implemented and what do they affect? + +*Use [Github semantic linking](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) to address any open issues this PR relates to or closes.* + +Fixes # (issue number, *if applicable*) + +Closes # (issue number, *if applicable*) + +Polkadot companion: (*if applicable*) + +Cumulus companion: (*if applicable*) + +# Checklist + +- [ ] My PR includes a detailed description as outlined in the "Description" section above +- [ ] My PR follows the [labeling requirements](https://github.com/paritytech/substrate/blob/master/docs/CONTRIBUTING.md#merge-process) of this project (at minimum one label for each `A`, `B`, `C` and `D` required) +- [ ] I have made corresponding changes to the documentation (if applicable) +- [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) +- [ ] If this PR alters any external APIs or interfaces used by Polkadot, the corresponding Polkadot PR is ready as well as the corresponding Cumulus PR (optional) + +You can remove the "Checklist" section once all have been checked. Thank you for your contribution! ✄ ----------------------------------------------------------------------------- diff --git a/docs/README.adoc b/docs/README.adoc index 8e43757d10fb2..3537e346a66e1 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -511,7 +511,7 @@ The code block annotations in the `# Example` section may be used as https://doc === Contributing Guidelines -include::CONTRIBUTING.adoc[] +include::CONTRIBUTING.md[] === Contributor Code of Conduct diff --git a/frame/alliance/Cargo.toml b/frame/alliance/Cargo.toml index d70dfd6d752eb..d0330ddfd6721 100644 --- a/frame/alliance/Cargo.toml +++ b/frame/alliance/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "4.1", optional = true } +array-bytes = { version = "6.1", optional = true } log = { version = "0.4.14", default-features = false } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-core-hashing = { version = "5.0.0", default-features = false, path = "../../primitives/core/hashing", optional = true } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-core-hashing = { version = "9.0.0", default-features = false, path = "../../primitives/core/hashing", optional = true } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } @@ -33,8 +33,8 @@ pallet-identity = { version = "4.0.0-dev", path = "../identity", default-feature pallet-collective = { version = "4.0.0-dev", path = "../collective", default-features = false, optional = true } [dev-dependencies] -array-bytes = "4.1" -sp-core-hashing = { version = "5.0.0", default-features = false, path = "../../primitives/core/hashing" } +array-bytes = "6.1" +sp-core-hashing = { version = "9.0.0", default-features = false, path = "../../primitives/core/hashing" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-collective = { version = "4.0.0-dev", path = "../collective" } @@ -54,6 +54,7 @@ std = [ "frame-support/std", "frame-system/std", "pallet-identity/std", + "pallet-balances/std" ] runtime-benchmarks = [ "array-bytes", @@ -64,8 +65,13 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-identity/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-collective?/try-runtime", + "pallet-identity/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 92bf1ae4468df..eb32c6c466c91 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -27,7 +27,7 @@ use sp_std::{ use frame_benchmarking::v1::{account, benchmarks_instance_pallet, BenchmarkError}; use frame_support::traits::{EnsureOrigin, Get, UnfilteredDispatchable}; -use frame_system::{Pallet as System, RawOrigin as SystemOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin as SystemOrigin}; use super::{Call as AllianceCall, Pallet as Alliance, *}; @@ -432,7 +432,7 @@ benchmarks_instance_pallet! { false, )?; - System::::set_block_number(T::BlockNumber::max_value()); + System::::set_block_number(BlockNumberFor::::max_value()); }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { @@ -504,7 +504,7 @@ benchmarks_instance_pallet! { } // caller is prime, prime already votes aye by creating the proposal - System::::set_block_number(T::BlockNumber::max_value()); + System::::set_block_number(BlockNumberFor::::max_value()); }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 6c034454cf7c0..c103f975f23be 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -309,7 +309,7 @@ pub mod pallet { /// The number of blocks a member must wait between giving a retirement notice and retiring. /// Supposed to be greater than time required to `kick_member`. - type RetirementPeriod: Get; + type RetirementPeriod: Get>; } #[pallet::error] @@ -405,11 +405,12 @@ pub mod pallet { pub struct GenesisConfig, I: 'static = ()> { pub fellows: Vec, pub allies: Vec, + #[serde(skip)] pub phantom: PhantomData<(T, I)>, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { for m in self.fellows.iter().chain(self.allies.iter()) { assert!(Pallet::::has_identity(m).is_ok(), "Member does not set identity!"); @@ -476,7 +477,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn retiring_members)] pub type RetiringMembers, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::AccountId, T::BlockNumber, OptionQuery>; + StorageMap<_, Blake2_128Concat, T::AccountId, BlockNumberFor, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index c334a3943b025..f04e7e414ed94 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -20,7 +20,6 @@ pub use sp_core::H256; use sp_runtime::traits::Hash; pub use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; @@ -28,7 +27,7 @@ use sp_std::convert::{TryFrom, TryInto}; pub use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, - traits::{EitherOfDiverse, GenesisBuild, SortedMembers}, + traits::{EitherOfDiverse, SortedMembers}, BoundedVec, }; use frame_system::{EnsureRoot, EnsureSignedBy}; @@ -52,13 +51,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = BlockNumber; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type DbWeight = (); @@ -89,7 +87,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -238,14 +236,10 @@ impl Config for Test { type RetirementPeriod = RetirementPeriod; } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -256,7 +250,7 @@ frame_support::construct_runtime!( ); pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![ @@ -274,14 +268,12 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage( - &pallet_alliance::GenesisConfig { - fellows: vec![], - allies: vec![], - phantom: Default::default(), - }, - &mut t, - ) + pallet_alliance::GenesisConfig:: { + fellows: vec![], + allies: vec![], + phantom: Default::default(), + } + .assimilate_storage(&mut t) .unwrap(); let mut ext = sp_io::TestExternalities::new(t); @@ -372,7 +364,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { #[cfg(feature = "runtime-benchmarks")] pub fn new_bench_ext() -> sp_io::TestExternalities { - GenesisConfig::default().build_storage().unwrap().into() + RuntimeGenesisConfig::default().build_storage().unwrap().into() } pub fn test_cid() -> Cid { diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index de7cda4710fc7..098fd86bbae1e 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -299,7 +299,7 @@ fn close_works() { })), record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Executed { proposal_hash: hash, - result: Err(DispatchError::BadOrigin), + result: Ok(()), })) ] ); diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 0276a9844a4a9..b5bb50957207f 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_alliance +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_alliance. pub trait WeightInfo { @@ -88,20 +92,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `618 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6676 + m * (31 ±0) + p * (37 ±0)` - // Minimum execution time: 32_316_000 picoseconds. - Weight::from_parts(35_185_484, 6676) - // Standard Error: 83 - .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(b.into())) - // Standard Error: 871 - .saturating_add(Weight::from_parts(21_235, 0).saturating_mul(m.into())) - // Standard Error: 860 - .saturating_add(Weight::from_parts(120_353, 0).saturating_mul(p.into())) + // Measured: `653 + m * (32 ±0) + p * (35 ±0)` + // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` + // Minimum execution time: 36_908_000 picoseconds. + Weight::from_parts(39_040_304, 6676) + // Standard Error: 131 + .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) + // Standard Error: 1_375 + .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) + // Standard Error: 1_358 + .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 31).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -112,10 +116,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1042 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 25_982_000 picoseconds. - Weight::from_parts(28_118_657, 6676) - // Standard Error: 855 - .saturating_add(Weight::from_parts(61_309, 0).saturating_mul(m.into())) + // Minimum execution time: 30_166_000 picoseconds. + Weight::from_parts(32_798_454, 6676) + // Standard Error: 1_432 + .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -134,18 +138,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `622 + m * (96 ±0) + p * (37 ±0)` - // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` - // Minimum execution time: 40_922_000 picoseconds. - Weight::from_parts(39_098_903, 6676) - // Standard Error: 714 - .saturating_add(Weight::from_parts(44_125, 0).saturating_mul(m.into())) - // Standard Error: 696 - .saturating_add(Weight::from_parts(111_263, 0).saturating_mul(p.into())) + // Measured: `576 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 45_173_000 picoseconds. + Weight::from_parts(42_192_020, 6676) + // Standard Error: 1_456 + .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) + // Standard Error: 1_420 + .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -162,19 +166,19 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `903 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `6676 + m * (98 ±0) + p * (40 ±0)` - // Minimum execution time: 51_890_000 picoseconds. - Weight::from_parts(49_880_817, 6676) - // Standard Error: 81 - .saturating_add(Weight::from_parts(688, 0).saturating_mul(b.into())) - // Standard Error: 862 - .saturating_add(Weight::from_parts(54_419, 0).saturating_mul(m.into())) - // Standard Error: 840 - .saturating_add(Weight::from_parts(122_253, 0).saturating_mul(p.into())) + // Measured: `1087 + m * (96 ±0) + p * (39 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` + // Minimum execution time: 58_290_000 picoseconds. + Weight::from_parts(54_924_919, 6676) + // Standard Error: 157 + .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) + // Standard Error: 1_623 + .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 98).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) @@ -193,18 +197,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `622 + m * (96 ±0) + p * (37 ±0)` - // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` - // Minimum execution time: 42_391_000 picoseconds. - Weight::from_parts(40_156_254, 6676) - // Standard Error: 728 - .saturating_add(Weight::from_parts(47_889, 0).saturating_mul(m.into())) - // Standard Error: 719 - .saturating_add(Weight::from_parts(112_596, 0).saturating_mul(p.into())) + // Measured: `577 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 46_794_000 picoseconds. + Weight::from_parts(43_092_958, 6676) + // Standard Error: 1_273 + .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) + // Standard Error: 1_257 + .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -223,16 +227,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `591 + m * (96 ±0) + p * (36 ±0)` + // Measured: `684 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 42_320_000 picoseconds. - Weight::from_parts(40_205_526, 6676) - // Standard Error: 64 - .saturating_add(Weight::from_parts(49, 0).saturating_mul(b.into())) - // Standard Error: 690 - .saturating_add(Weight::from_parts(46_508, 0).saturating_mul(m.into())) - // Standard Error: 665 - .saturating_add(Weight::from_parts(112_222, 0).saturating_mul(p.into())) + // Minimum execution time: 47_338_000 picoseconds. + Weight::from_parts(41_257_479, 6676) + // Standard Error: 119 + .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) + // Standard Error: 1_277 + .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) + // Standard Error: 1_231 + .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -248,12 +252,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `217` // Estimated: `12362` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(23_584_337, 12362) - // Standard Error: 377 - .saturating_add(Weight::from_parts(114_917, 0).saturating_mul(m.into())) - // Standard Error: 373 - .saturating_add(Weight::from_parts(97_593, 0).saturating_mul(z.into())) + // Minimum execution time: 35_012_000 picoseconds. + Weight::from_parts(24_288_079, 12362) + // Standard Error: 878 + .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) + // Standard Error: 867 + .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -276,14 +280,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 275_061_000 picoseconds. - Weight::from_parts(565_248, 12362) - // Standard Error: 15_948 - .saturating_add(Weight::from_parts(1_636_348, 0).saturating_mul(x.into())) - // Standard Error: 15_761 - .saturating_add(Weight::from_parts(1_580_146, 0).saturating_mul(y.into())) - // Standard Error: 31_496 - .saturating_add(Weight::from_parts(17_217_382, 0).saturating_mul(z.into())) + // Minimum execution time: 309_235_000 picoseconds. + Weight::from_parts(311_279_000, 12362) + // Standard Error: 26_510 + .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) + // Standard Error: 26_382 + .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) + // Standard Error: 52_716 + .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -300,8 +304,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_261_000 picoseconds. - Weight::from_parts(10_389_000, 0) + // Minimum execution time: 8_833_000 picoseconds. + Weight::from_parts(9_313_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Alliance Announcements (r:1 w:1) @@ -310,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246` // Estimated: `10187` - // Minimum execution time: 13_483_000 picoseconds. - Weight::from_parts(13_805_000, 10187) + // Minimum execution time: 12_231_000 picoseconds. + Weight::from_parts(12_761_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -321,8 +325,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `10187` - // Minimum execution time: 14_816_000 picoseconds. - Weight::from_parts(15_163_000, 10187) + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(13_612_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -338,8 +342,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `468` // Estimated: `18048` - // Minimum execution time: 46_149_000 picoseconds. - Weight::from_parts(46_827_000, 18048) + // Minimum execution time: 44_574_000 picoseconds. + Weight::from_parts(46_157_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -351,8 +355,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `367` // Estimated: `18048` - // Minimum execution time: 28_463_000 picoseconds. - Weight::from_parts(28_730_000, 18048) + // Minimum execution time: 26_114_000 picoseconds. + Weight::from_parts(27_069_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -368,8 +372,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `443` // Estimated: `12362` - // Minimum execution time: 28_401_000 picoseconds. - Weight::from_parts(28_717_000, 12362) + // Minimum execution time: 25_882_000 picoseconds. + Weight::from_parts(26_923_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -387,8 +391,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `443` // Estimated: `23734` - // Minimum execution time: 36_538_000 picoseconds. - Weight::from_parts(37_197_000, 23734) + // Minimum execution time: 34_112_000 picoseconds. + Weight::from_parts(35_499_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -404,8 +408,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `687` // Estimated: `6676` - // Minimum execution time: 42_324_000 picoseconds. - Weight::from_parts(42_890_000, 6676) + // Minimum execution time: 41_239_000 picoseconds. + Weight::from_parts(42_764_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -425,8 +429,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `707` // Estimated: `18048` - // Minimum execution time: 68_003_000 picoseconds. - Weight::from_parts(68_657_000, 18048) + // Minimum execution time: 68_071_000 picoseconds. + Weight::from_parts(71_808_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -440,12 +444,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246` // Estimated: `27187` - // Minimum execution time: 8_304_000 picoseconds. - Weight::from_parts(8_424_000, 27187) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(1_529_793, 0).saturating_mul(n.into())) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(71_352, 0).saturating_mul(l.into())) + // Minimum execution time: 7_006_000 picoseconds. + Weight::from_parts(7_253_000, 27187) + // Standard Error: 3_403 + .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) + // Standard Error: 1_333 + .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -459,12 +463,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 8_348_000 picoseconds. - Weight::from_parts(8_505_000, 27187) - // Standard Error: 187_398 - .saturating_add(Weight::from_parts(16_545_597, 0).saturating_mul(n.into())) - // Standard Error: 73_393 - .saturating_add(Weight::from_parts(350_415, 0).saturating_mul(l.into())) + // Minimum execution time: 7_292_000 picoseconds. + Weight::from_parts(7_629_000, 27187) + // Standard Error: 176_225 + .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) + // Standard Error: 69_017 + .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -480,8 +484,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `443` // Estimated: `18048` - // Minimum execution time: 34_461_000 picoseconds. - Weight::from_parts(34_992_000, 18048) + // Minimum execution time: 31_798_000 picoseconds. + Weight::from_parts(33_463_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -504,20 +508,20 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `618 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6676 + m * (31 ±0) + p * (37 ±0)` - // Minimum execution time: 32_316_000 picoseconds. - Weight::from_parts(35_185_484, 6676) - // Standard Error: 83 - .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(b.into())) - // Standard Error: 871 - .saturating_add(Weight::from_parts(21_235, 0).saturating_mul(m.into())) - // Standard Error: 860 - .saturating_add(Weight::from_parts(120_353, 0).saturating_mul(p.into())) + // Measured: `653 + m * (32 ±0) + p * (35 ±0)` + // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` + // Minimum execution time: 36_908_000 picoseconds. + Weight::from_parts(39_040_304, 6676) + // Standard Error: 131 + .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) + // Standard Error: 1_375 + .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) + // Standard Error: 1_358 + .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 31).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -528,10 +532,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1042 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 25_982_000 picoseconds. - Weight::from_parts(28_118_657, 6676) - // Standard Error: 855 - .saturating_add(Weight::from_parts(61_309, 0).saturating_mul(m.into())) + // Minimum execution time: 30_166_000 picoseconds. + Weight::from_parts(32_798_454, 6676) + // Standard Error: 1_432 + .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -550,18 +554,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `622 + m * (96 ±0) + p * (37 ±0)` - // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` - // Minimum execution time: 40_922_000 picoseconds. - Weight::from_parts(39_098_903, 6676) - // Standard Error: 714 - .saturating_add(Weight::from_parts(44_125, 0).saturating_mul(m.into())) - // Standard Error: 696 - .saturating_add(Weight::from_parts(111_263, 0).saturating_mul(p.into())) + // Measured: `576 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 45_173_000 picoseconds. + Weight::from_parts(42_192_020, 6676) + // Standard Error: 1_456 + .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) + // Standard Error: 1_420 + .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -578,19 +582,19 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `903 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `6676 + m * (98 ±0) + p * (40 ±0)` - // Minimum execution time: 51_890_000 picoseconds. - Weight::from_parts(49_880_817, 6676) - // Standard Error: 81 - .saturating_add(Weight::from_parts(688, 0).saturating_mul(b.into())) - // Standard Error: 862 - .saturating_add(Weight::from_parts(54_419, 0).saturating_mul(m.into())) - // Standard Error: 840 - .saturating_add(Weight::from_parts(122_253, 0).saturating_mul(p.into())) + // Measured: `1087 + m * (96 ±0) + p * (39 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` + // Minimum execution time: 58_290_000 picoseconds. + Weight::from_parts(54_924_919, 6676) + // Standard Error: 157 + .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) + // Standard Error: 1_623 + .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 98).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) @@ -609,18 +613,18 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `622 + m * (96 ±0) + p * (37 ±0)` - // Estimated: `6676 + m * (96 ±0) + p * (37 ±0)` - // Minimum execution time: 42_391_000 picoseconds. - Weight::from_parts(40_156_254, 6676) - // Standard Error: 728 - .saturating_add(Weight::from_parts(47_889, 0).saturating_mul(m.into())) - // Standard Error: 719 - .saturating_add(Weight::from_parts(112_596, 0).saturating_mul(p.into())) + // Measured: `577 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 46_794_000 picoseconds. + Weight::from_parts(43_092_958, 6676) + // Standard Error: 1_273 + .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) + // Standard Error: 1_257 + .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } /// Storage: Alliance Members (r:1 w:0) /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) @@ -639,16 +643,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `591 + m * (96 ±0) + p * (36 ±0)` + // Measured: `684 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 42_320_000 picoseconds. - Weight::from_parts(40_205_526, 6676) - // Standard Error: 64 - .saturating_add(Weight::from_parts(49, 0).saturating_mul(b.into())) - // Standard Error: 690 - .saturating_add(Weight::from_parts(46_508, 0).saturating_mul(m.into())) - // Standard Error: 665 - .saturating_add(Weight::from_parts(112_222, 0).saturating_mul(p.into())) + // Minimum execution time: 47_338_000 picoseconds. + Weight::from_parts(41_257_479, 6676) + // Standard Error: 119 + .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) + // Standard Error: 1_277 + .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) + // Standard Error: 1_231 + .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -664,12 +668,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `217` // Estimated: `12362` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(23_584_337, 12362) - // Standard Error: 377 - .saturating_add(Weight::from_parts(114_917, 0).saturating_mul(m.into())) - // Standard Error: 373 - .saturating_add(Weight::from_parts(97_593, 0).saturating_mul(z.into())) + // Minimum execution time: 35_012_000 picoseconds. + Weight::from_parts(24_288_079, 12362) + // Standard Error: 878 + .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) + // Standard Error: 867 + .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -692,14 +696,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 275_061_000 picoseconds. - Weight::from_parts(565_248, 12362) - // Standard Error: 15_948 - .saturating_add(Weight::from_parts(1_636_348, 0).saturating_mul(x.into())) - // Standard Error: 15_761 - .saturating_add(Weight::from_parts(1_580_146, 0).saturating_mul(y.into())) - // Standard Error: 31_496 - .saturating_add(Weight::from_parts(17_217_382, 0).saturating_mul(z.into())) + // Minimum execution time: 309_235_000 picoseconds. + Weight::from_parts(311_279_000, 12362) + // Standard Error: 26_510 + .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) + // Standard Error: 26_382 + .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) + // Standard Error: 52_716 + .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -716,8 +720,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_261_000 picoseconds. - Weight::from_parts(10_389_000, 0) + // Minimum execution time: 8_833_000 picoseconds. + Weight::from_parts(9_313_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Alliance Announcements (r:1 w:1) @@ -726,8 +730,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246` // Estimated: `10187` - // Minimum execution time: 13_483_000 picoseconds. - Weight::from_parts(13_805_000, 10187) + // Minimum execution time: 12_231_000 picoseconds. + Weight::from_parts(12_761_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -737,8 +741,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `10187` - // Minimum execution time: 14_816_000 picoseconds. - Weight::from_parts(15_163_000, 10187) + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(13_612_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -754,8 +758,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `468` // Estimated: `18048` - // Minimum execution time: 46_149_000 picoseconds. - Weight::from_parts(46_827_000, 18048) + // Minimum execution time: 44_574_000 picoseconds. + Weight::from_parts(46_157_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -767,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `367` // Estimated: `18048` - // Minimum execution time: 28_463_000 picoseconds. - Weight::from_parts(28_730_000, 18048) + // Minimum execution time: 26_114_000 picoseconds. + Weight::from_parts(27_069_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -784,8 +788,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `443` // Estimated: `12362` - // Minimum execution time: 28_401_000 picoseconds. - Weight::from_parts(28_717_000, 12362) + // Minimum execution time: 25_882_000 picoseconds. + Weight::from_parts(26_923_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -803,8 +807,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `443` // Estimated: `23734` - // Minimum execution time: 36_538_000 picoseconds. - Weight::from_parts(37_197_000, 23734) + // Minimum execution time: 34_112_000 picoseconds. + Weight::from_parts(35_499_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -820,8 +824,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `687` // Estimated: `6676` - // Minimum execution time: 42_324_000 picoseconds. - Weight::from_parts(42_890_000, 6676) + // Minimum execution time: 41_239_000 picoseconds. + Weight::from_parts(42_764_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -841,8 +845,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `707` // Estimated: `18048` - // Minimum execution time: 68_003_000 picoseconds. - Weight::from_parts(68_657_000, 18048) + // Minimum execution time: 68_071_000 picoseconds. + Weight::from_parts(71_808_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -856,12 +860,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246` // Estimated: `27187` - // Minimum execution time: 8_304_000 picoseconds. - Weight::from_parts(8_424_000, 27187) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(1_529_793, 0).saturating_mul(n.into())) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(71_352, 0).saturating_mul(l.into())) + // Minimum execution time: 7_006_000 picoseconds. + Weight::from_parts(7_253_000, 27187) + // Standard Error: 3_403 + .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) + // Standard Error: 1_333 + .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -875,12 +879,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 8_348_000 picoseconds. - Weight::from_parts(8_505_000, 27187) - // Standard Error: 187_398 - .saturating_add(Weight::from_parts(16_545_597, 0).saturating_mul(n.into())) - // Standard Error: 73_393 - .saturating_add(Weight::from_parts(350_415, 0).saturating_mul(l.into())) + // Minimum execution time: 7_292_000 picoseconds. + Weight::from_parts(7_629_000, 27187) + // Standard Error: 176_225 + .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) + // Standard Error: 69_017 + .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -896,8 +900,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `443` // Estimated: `18048` - // Minimum execution time: 34_461_000 picoseconds. - Weight::from_parts(34_992_000, 18048) + // Minimum execution time: 31_798_000 picoseconds. + Weight::from_parts(33_463_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/frame/asset-conversion/Cargo.toml b/frame/asset-conversion/Cargo.toml new file mode 100644 index 0000000000000..77bfab48bbf08 --- /dev/null +++ b/frame/asset-conversion/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-asset-conversion" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME asset conversion pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-assets = { version = "4.0.0-dev", path = "../assets" } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-std/std", + "sp-runtime/std", + "sp-arithmetic/std", + "pallet-assets/std", + "pallet-balances/std", + "sp-api/std", + "sp-core/std", + "sp-io/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/asset-conversion/README.md b/frame/asset-conversion/README.md new file mode 100644 index 0000000000000..e895db5e83adf --- /dev/null +++ b/frame/asset-conversion/README.md @@ -0,0 +1,25 @@ +# asset-conversion + +## A swap pallet + +This pallet allows assets to be converted from one type to another by means of a constant product formula. +The pallet based is based on [Uniswap V2](https://github.com/Uniswap/v2-core) logic. + +### Overview + +This pallet allows you to: + + - create a liquidity pool for 2 assets + - provide the liquidity and receive back an LP token + - exchange the LP token back to assets + - swap 2 assets if there is a pool created + - query for an exchange price via a new runtime call endpoint + - query the size of a liquidity pool. + +Please see the rust module documentation for full details: + +`cargo doc -p pallet-asset-conversion --open` + +### License + +License: Apache-2.0 diff --git a/frame/asset-conversion/src/benchmarking.rs b/frame/asset-conversion/src/benchmarking.rs new file mode 100644 index 0000000000000..87b541cd4744d --- /dev/null +++ b/frame/asset-conversion/src/benchmarking.rs @@ -0,0 +1,338 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Asset Conversion pallet benchmarking. + +use super::*; +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_support::{ + assert_ok, + storage::bounded_vec::BoundedVec, + traits::{ + fungible::{Inspect as InspectFungible, Mutate as MutateFungible, Unbalanced}, + fungibles::{Create, Inspect, Mutate}, + }, +}; +use frame_system::RawOrigin as SystemOrigin; +use sp_core::Get; +use sp_runtime::traits::{Bounded, StaticLookup}; +use sp_std::{ops::Div, prelude::*}; + +use crate::Pallet as AssetConversion; + +const INITIAL_ASSET_BALANCE: u128 = 1_000_000_000_000; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = + <::Currency as InspectFungible<::AccountId>>::Balance; + +fn get_lp_token_id() -> T::PoolAssetId +where + T::PoolAssetId: Into, +{ + let next_id: u32 = AssetConversion::::get_next_pool_asset_id().into(); + (next_id - 1).into() +} + +fn create_asset(asset: &T::MultiAssetId) -> (T::AccountId, AccountIdLookupOf) +where + T::AssetBalance: From, + T::Currency: Unbalanced, + T::Assets: Create + Mutate, +{ + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + if let MultiAssetIdConversionResult::Converted(asset_id) = + T::MultiAssetIdConverter::try_convert(asset) + { + T::Currency::set_balance(&caller, BalanceOf::::max_value().div(1000u32.into())); + assert_ok!(T::Assets::create(asset_id.clone(), caller.clone(), true, 1.into())); + assert_ok!(T::Assets::mint_into(asset_id, &caller, INITIAL_ASSET_BALANCE.into())); + } + (caller, caller_lookup) +} + +fn create_asset_and_pool( + asset1: &T::MultiAssetId, + asset2: &T::MultiAssetId, +) -> (T::PoolAssetId, T::AccountId, AccountIdLookupOf) +where + T::AssetBalance: From, + T::Currency: Unbalanced, + T::Assets: Create + Mutate, + T::PoolAssetId: Into, +{ + let (_, _) = create_asset::(asset1); + let (caller, caller_lookup) = create_asset::(asset2); + + assert_ok!(AssetConversion::::create_pool( + SystemOrigin::Signed(caller.clone()).into(), + asset1.clone(), + asset2.clone() + )); + let lp_token = get_lp_token_id::(); + + (lp_token, caller, caller_lookup) +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks! { + where_clause { + where + T::AssetBalance: From + Into, + T::Currency: Unbalanced, + T::Balance: From + Into, + T::Assets: Create + Mutate, + T::PoolAssetId: Into, + } + + create_pool { + let asset1 = T::MultiAssetIdConverter::get_native(); + let asset2 = T::BenchmarkHelper::multiasset_id(0); + let (caller, _) = create_asset::(&asset2); + }: _(SystemOrigin::Signed(caller.clone()), asset1.clone(), asset2.clone()) + verify { + let lp_token = get_lp_token_id::(); + let pool_id = (asset1.clone(), asset2.clone()); + assert_last_event::(Event::PoolCreated { + creator: caller.clone(), + pool_account: AssetConversion::::get_pool_account(&pool_id), + pool_id, + lp_token, + }.into()); + } + + add_liquidity { + let asset1 = T::MultiAssetIdConverter::get_native(); + let asset2 = T::BenchmarkHelper::multiasset_id(0); + let (lp_token, caller, _) = create_asset_and_pool::(&asset1, &asset2); + let ed: u128 = T::Currency::minimum_balance().into(); + let add_amount = 1000 + ed; + }: _(SystemOrigin::Signed(caller.clone()), asset1.clone(), asset2.clone(), add_amount.into(), 1000.into(), 0.into(), 0.into(), caller.clone()) + verify { + let pool_id = (asset1.clone(), asset2.clone()); + let lp_minted = AssetConversion::::calc_lp_amount_for_zero_supply(&add_amount.into(), &1000.into()).unwrap().into(); + assert_eq!( + T::PoolAssets::balance(lp_token, &caller), + lp_minted.into() + ); + assert_eq!( + T::Currency::balance(&AssetConversion::::get_pool_account(&pool_id)), + add_amount.into() + ); + assert_eq!( + T::Assets::balance(T::BenchmarkHelper::asset_id(0), &AssetConversion::::get_pool_account(&pool_id)), + 1000.into() + ); + } + + remove_liquidity { + let asset1 = T::MultiAssetIdConverter::get_native(); + let asset2 = T::BenchmarkHelper::multiasset_id(0); + let (lp_token, caller, _) = create_asset_and_pool::(&asset1, &asset2); + let ed: u128 = T::Currency::minimum_balance().into(); + let add_amount = 100 * ed; + let lp_minted = AssetConversion::::calc_lp_amount_for_zero_supply(&add_amount.into(), &1000.into()).unwrap().into(); + let remove_lp_amount = lp_minted.checked_div(10).unwrap(); + + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + asset1.clone(), + asset2.clone(), + add_amount.into(), + 1000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + let total_supply = >::total_issuance(lp_token.clone()); + }: _(SystemOrigin::Signed(caller.clone()), asset1, asset2, remove_lp_amount.into(), 0.into(), 0.into(), caller.clone()) + verify { + let new_total_supply = >::total_issuance(lp_token.clone()); + assert_eq!( + new_total_supply, + total_supply - remove_lp_amount.into() + ); + } + + swap_exact_tokens_for_tokens { + let native = T::MultiAssetIdConverter::get_native(); + let asset1 = T::BenchmarkHelper::multiasset_id(1); + let asset2 = T::BenchmarkHelper::multiasset_id(2); + let (_, caller, _) = create_asset_and_pool::(&native, &asset1); + let (_, _) = create_asset::(&asset2); + let ed: u128 = T::Currency::minimum_balance().into(); + + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + native.clone(), + asset1.clone(), + (100 * ed).into(), + 200.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + + let path; + let swap_amount; + // if we only allow the native-asset pools, then the worst case scenario would be to swap + // asset1-native-asset2 + if !T::AllowMultiAssetPools::get() { + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), native.clone(), asset2.clone())?; + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + native.clone(), + asset2.clone(), + (500 * ed).into(), + 1000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + path = vec![asset1.clone(), native.clone(), asset2.clone()]; + swap_amount = 100.into(); + } else { + let asset3 = T::BenchmarkHelper::multiasset_id(3); + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset1.clone(), asset2.clone())?; + let (_, _) = create_asset::(&asset3); + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset2.clone(), asset3.clone())?; + + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + asset1.clone(), + asset2.clone(), + 200.into(), + 2000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + asset2.clone(), + asset3.clone(), + 2000.into(), + 2000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + path = vec![native.clone(), asset1.clone(), asset2.clone(), asset3.clone()]; + swap_amount = ed.into(); + } + + let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap(); + let native_balance = T::Currency::balance(&caller); + let asset1_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(1), &caller); + }: _(SystemOrigin::Signed(caller.clone()), path, swap_amount, 1.into(), caller.clone(), false) + verify { + if !T::AllowMultiAssetPools::get() { + let new_asset1_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(1), &caller); + assert_eq!(new_asset1_balance, asset1_balance - 100.into()); + } else { + let new_native_balance = T::Currency::balance(&caller); + assert_eq!(new_native_balance, native_balance - ed.into()); + } + } + + swap_tokens_for_exact_tokens { + let native = T::MultiAssetIdConverter::get_native(); + let asset1 = T::BenchmarkHelper::multiasset_id(1); + let asset2 = T::BenchmarkHelper::multiasset_id(2); + let (_, caller, _) = create_asset_and_pool::(&native, &asset1); + let (_, _) = create_asset::(&asset2); + let ed: u128 = T::Currency::minimum_balance().into(); + + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + native.clone(), + asset1.clone(), + (1000 * ed).into(), + 500.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + + let path; + // if we only allow the native-asset pools, then the worst case scenario would be to swap + // asset1-native-asset2 + if !T::AllowMultiAssetPools::get() { + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), native.clone(), asset2.clone())?; + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + native.clone(), + asset2.clone(), + (500 * ed).into(), + 1000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + path = vec![asset1.clone(), native.clone(), asset2.clone()]; + } else { + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset1.clone(), asset2.clone())?; + let asset3 = T::BenchmarkHelper::multiasset_id(3); + let (_, _) = create_asset::(&asset3); + AssetConversion::::create_pool(SystemOrigin::Signed(caller.clone()).into(), asset2.clone(), asset3.clone())?; + + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + asset1.clone(), + asset2.clone(), + 2000.into(), + 2000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + asset2.clone(), + asset3.clone(), + 2000.into(), + 2000.into(), + 0.into(), + 0.into(), + caller.clone(), + )?; + path = vec![native.clone(), asset1.clone(), asset2.clone(), asset3.clone()]; + } + + let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap(); + let asset2_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(2), &caller); + let asset3_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(3), &caller); + }: _(SystemOrigin::Signed(caller.clone()), path.clone(), 100.into(), (1000 * ed).into(), caller.clone(), false) + verify { + if !T::AllowMultiAssetPools::get() { + let new_asset2_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(2), &caller); + assert_eq!(new_asset2_balance, asset2_balance + 100.into()); + } else { + let new_asset3_balance = T::Assets::balance(T::BenchmarkHelper::asset_id(3), &caller); + assert_eq!(new_asset3_balance, asset3_balance + 100.into()); + } + } + + impl_benchmark_test_suite!(AssetConversion, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/asset-conversion/src/lib.rs b/frame/asset-conversion/src/lib.rs new file mode 100644 index 0000000000000..3a16ab875c06a --- /dev/null +++ b/frame/asset-conversion/src/lib.rs @@ -0,0 +1,1305 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Substrate Asset Conversion pallet +//! +//! Substrate Asset Conversion pallet based on the [Uniswap V2](https://github.com/Uniswap/v2-core) logic. +//! +//! ## Overview +//! +//! This pallet allows you to: +//! +//! - [create a liquidity pool](`Pallet::create_pool()`) for 2 assets +//! - [provide the liquidity](`Pallet::add_liquidity()`) and receive back an LP token +//! - [exchange the LP token back to assets](`Pallet::remove_liquidity()`) +//! - [swap a specific amount of assets for another](`Pallet::swap_exact_tokens_for_tokens()`) if +//! there is a pool created, or +//! - [swap some assets for a specific amount of +//! another](`Pallet::swap_tokens_for_exact_tokens()`). +//! - [query for an exchange price](`AssetConversionApi::quote_price_exact_tokens_for_tokens`) via +//! a runtime call endpoint +//! - [query the size of a liquidity pool](`AssetConversionApi::get_reserves`) via a runtime api +//! endpoint. +//! +//! The `quote_price_exact_tokens_for_tokens` and `quote_price_tokens_for_exact_tokens` functions +//! both take a path parameter of the route to take. If you want to swap from native asset to +//! non-native asset 1, you would pass in a path of `[DOT, 1]` or `[1, DOT]`. If you want to swap +//! from non-native asset 1 to non-native asset 2, you would pass in a path of `[1, DOT, 2]`. +//! +//! (For an example of configuring this pallet to use `MultiLocation` as an asset id, see the +//! cumulus repo). +//! +//! Here is an example `state_call` that asks for a quote of a pool of native versus asset 1: +//! +//! ```text +//! curl -sS -H "Content-Type: application/json" -d \ +//! '{"id":1, "jsonrpc":"2.0", "method": "state_call", "params": ["AssetConversionApi_quote_price_tokens_for_exact_tokens", "0x0101000000000000000000000011000000000000000000"]}' \ +//! http://localhost:9933/ +//! ``` +//! (This can be run against the kitchen sync node in the `node` folder of this repo.) +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] +use frame_support::traits::{DefensiveOption, Incrementable}; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +mod types; +pub mod weights; + +#[cfg(test)] +mod tests; + +#[cfg(test)] +mod mock; + +use codec::Codec; +use frame_support::{ + ensure, + traits::tokens::{AssetId, Balance}, +}; +use frame_system::{ + ensure_signed, + pallet_prelude::{BlockNumberFor, OriginFor}, +}; +pub use pallet::*; +use sp_arithmetic::traits::Unsigned; +use sp_runtime::{ + traits::{ + CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Ensure, MaybeDisplay, TrailingZeroInput, + }, + DispatchError, +}; +use sp_std::prelude::*; +pub use types::*; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + pallet_prelude::*, + traits::{ + fungible::{Inspect as InspectFungible, Mutate as MutateFungible}, + fungibles::{Create, Inspect, Mutate}, + tokens::{ + Fortitude::Polite, + Precision::Exact, + Preservation::{Expendable, Preserve}, + }, + AccountTouch, ContainsPair, + }, + BoundedBTreeSet, PalletId, + }; + use sp_arithmetic::Permill; + use sp_runtime::{ + traits::{IntegerSquareRoot, One, Zero}, + Saturating, + }; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Currency type that this works on. + type Currency: InspectFungible + + MutateFungible; + + /// The `Currency::Balance` type of the native currency. + type Balance: Balance; + + /// The type used to describe the amount of fractions converted into assets. + type AssetBalance: Balance; + + /// A type used for conversions between `Balance` and `AssetBalance`. + type HigherPrecisionBalance: IntegerSquareRoot + + One + + Ensure + + Unsigned + + From + + From + + From + + TryInto + + TryInto; + + /// Identifier for the class of non-native asset. + /// Note: A `From` bound here would prevent `MultiLocation` from being used as an + /// `AssetId`. + type AssetId: AssetId; + + /// Type that identifies either the native currency or a token class from `Assets`. + /// `Ord` is added because of `get_pool_id`. + type MultiAssetId: AssetId + Ord + From; + + /// Type to convert an `AssetId` into `MultiAssetId`. + type MultiAssetIdConverter: MultiAssetIdConverter; + + /// `AssetId` to address the lp tokens by. + type PoolAssetId: AssetId + PartialOrd + Incrementable + From; + + /// Registry for the assets. + type Assets: Inspect + + Mutate + + AccountTouch + + ContainsPair; + + /// Registry for the lp tokens. Ideally only this pallet should have create permissions on + /// the assets. + type PoolAssets: Inspect + + Create + + Mutate + + AccountTouch; + + /// A % the liquidity providers will take of every swap. Represents 10ths of a percent. + #[pallet::constant] + type LPFee: Get; + + /// A one-time fee to setup the pool. + #[pallet::constant] + type PoolSetupFee: Get; + + /// An account that receives the pool setup fee. + type PoolSetupFeeReceiver: Get; + + /// A fee to withdraw the liquidity. + #[pallet::constant] + type LiquidityWithdrawalFee: Get; + + /// The minimum LP token amount that could be minted. Ameliorates rounding errors. + #[pallet::constant] + type MintMinLiquidity: Get; + + /// The max number of hops in a swap. + #[pallet::constant] + type MaxSwapPathLength: Get; + + /// The pallet's id, used for deriving its sovereign account ID. + #[pallet::constant] + type PalletId: Get; + + /// A setting to allow creating pools with both non-native assets. + #[pallet::constant] + type AllowMultiAssetPools: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// The benchmarks need a way to create asset ids from u32s. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelper; + } + + /// Map from `PoolAssetId` to `PoolInfo`. This establishes whether a pool has been officially + /// created rather than people sending tokens directly to a pool's public account. + #[pallet::storage] + pub type Pools = + StorageMap<_, Blake2_128Concat, PoolIdOf, PoolInfo, OptionQuery>; + + /// Stores the `PoolAssetId` that is going to be used for the next lp token. + /// This gets incremented whenever a new lp pool is created. + #[pallet::storage] + pub type NextPoolAssetId = StorageValue<_, T::PoolAssetId, OptionQuery>; + + // Pallet's events. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A successful call of the `CretaPool` extrinsic will create this event. + PoolCreated { + /// The account that created the pool. + creator: T::AccountId, + /// The pool id associated with the pool. Note that the order of the assets may not be + /// the same as the order specified in the create pool extrinsic. + pool_id: PoolIdOf, + /// The account ID of the pool. + pool_account: T::AccountId, + /// The id of the liquidity tokens that will be minted when assets are added to this + /// pool. + lp_token: T::PoolAssetId, + }, + + /// A successful call of the `AddLiquidity` extrinsic will create this event. + LiquidityAdded { + /// The account that the liquidity was taken from. + who: T::AccountId, + /// The account that the liquidity tokens were minted to. + mint_to: T::AccountId, + /// The pool id of the pool that the liquidity was added to. + pool_id: PoolIdOf, + /// The amount of the first asset that was added to the pool. + amount1_provided: T::AssetBalance, + /// The amount of the second asset that was added to the pool. + amount2_provided: T::AssetBalance, + /// The id of the lp token that was minted. + lp_token: T::PoolAssetId, + /// The amount of lp tokens that were minted of that id. + lp_token_minted: T::AssetBalance, + }, + + /// A successful call of the `RemoveLiquidity` extrinsic will create this event. + LiquidityRemoved { + /// The account that the liquidity tokens were burned from. + who: T::AccountId, + /// The account that the assets were transferred to. + withdraw_to: T::AccountId, + /// The pool id that the liquidity was removed from. + pool_id: PoolIdOf, + /// The amount of the first asset that was removed from the pool. + amount1: T::AssetBalance, + /// The amount of the second asset that was removed from the pool. + amount2: T::AssetBalance, + /// The id of the lp token that was burned. + lp_token: T::PoolAssetId, + /// The amount of lp tokens that were burned of that id. + lp_token_burned: T::AssetBalance, + /// Liquidity withdrawal fee (%). + withdrawal_fee: Permill, + }, + /// Assets have been converted from one to another. Both `SwapExactTokenForToken` + /// and `SwapTokenForExactToken` will generate this event. + SwapExecuted { + /// Which account was the instigator of the swap. + who: T::AccountId, + /// The account that the assets were transferred to. + send_to: T::AccountId, + /// The route of asset ids that the swap went through. + /// E.g. A -> Dot -> B + path: BoundedVec, + /// The amount of the first asset that was swapped. + amount_in: T::AssetBalance, + /// The amount of the second asset that was received. + amount_out: T::AssetBalance, + }, + /// An amount has been transferred from one account to another. + Transfer { + /// The account that the assets were transferred from. + from: T::AccountId, + /// The account that the assets were transferred to. + to: T::AccountId, + /// The asset that was transferred. + asset: T::MultiAssetId, + /// The amount of the asset that was transferred. + amount: T::AssetBalance, + }, + } + + #[pallet::error] + pub enum Error { + /// Provided assets are equal. + EqualAssets, + /// Provided asset is not supported for pool. + UnsupportedAsset, + /// Pool already exists. + PoolExists, + /// Desired amount can't be zero. + WrongDesiredAmount, + /// Provided amount should be greater than or equal to the existential deposit/asset's + /// minimal amount. + AmountOneLessThanMinimal, + /// Provided amount should be greater than or equal to the existential deposit/asset's + /// minimal amount. + AmountTwoLessThanMinimal, + /// Reserve needs to always be greater than or equal to the existential deposit/asset's + /// minimal amount. + ReserveLeftLessThanMinimal, + /// Desired amount can't be equal to the pool reserve. + AmountOutTooHigh, + /// The pool doesn't exist. + PoolNotFound, + /// An overflow happened. + Overflow, + /// The minimal amount requirement for the first token in the pair wasn't met. + AssetOneDepositDidNotMeetMinimum, + /// The minimal amount requirement for the second token in the pair wasn't met. + AssetTwoDepositDidNotMeetMinimum, + /// The minimal amount requirement for the first token in the pair wasn't met. + AssetOneWithdrawalDidNotMeetMinimum, + /// The minimal amount requirement for the second token in the pair wasn't met. + AssetTwoWithdrawalDidNotMeetMinimum, + /// Optimal calculated amount is less than desired. + OptimalAmountLessThanDesired, + /// Insufficient liquidity minted. + InsufficientLiquidityMinted, + /// Requested liquidity can't be zero. + ZeroLiquidity, + /// Amount can't be zero. + ZeroAmount, + /// Insufficient liquidity in the pool. + InsufficientLiquidity, + /// Calculated amount out is less than provided minimum amount. + ProvidedMinimumNotSufficientForSwap, + /// Provided maximum amount is not sufficient for swap. + ProvidedMaximumNotSufficientForSwap, + /// Only pools with native on one side are valid. + PoolMustContainNativeCurrency, + /// The provided path must consists of 2 assets at least. + InvalidPath, + /// It was not possible to calculate path data. + PathError, + /// The provided path must consists of unique assets. + NonUniquePath, + /// It was not possible to get or increment the Id of the pool. + IncorrectPoolAssetId, + /// Unable to find an element in an array/vec that should have one-to-one correspondence + /// with another. For example, an array of assets constituting a `path` should have a + /// corresponding array of `amounts` along the path. + CorrespondenceError, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + assert!( + T::MaxSwapPathLength::get() > 1, + "the `MaxSwapPathLength` should be greater than 1", + ); + } + } + + /// Pallet's callable functions. + #[pallet::call] + impl Pallet { + /// Creates an empty liquidity pool and an associated new `lp_token` asset + /// (the id of which is returned in the `Event::PoolCreated` event). + /// + /// Once a pool is created, someone may [`Pallet::add_liquidity`] to it. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::create_pool())] + pub fn create_pool( + origin: OriginFor, + asset1: T::MultiAssetId, + asset2: T::MultiAssetId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(asset1 != asset2, Error::::EqualAssets); + + // prepare pool_id + let pool_id = Self::get_pool_id(asset1, asset2); + ensure!(!Pools::::contains_key(&pool_id), Error::::PoolExists); + let (asset1, asset2) = &pool_id; + if !T::AllowMultiAssetPools::get() && !T::MultiAssetIdConverter::is_native(asset1) { + Err(Error::::PoolMustContainNativeCurrency)?; + } + + let pool_account = Self::get_pool_account(&pool_id); + frame_system::Pallet::::inc_providers(&pool_account); + + // pay the setup fee + T::Currency::transfer( + &sender, + &T::PoolSetupFeeReceiver::get(), + T::PoolSetupFee::get(), + Preserve, + )?; + + // try to convert both assets + match T::MultiAssetIdConverter::try_convert(asset1) { + MultiAssetIdConversionResult::Converted(asset) => + if !T::Assets::contains(&asset, &pool_account) { + T::Assets::touch(asset, pool_account.clone(), sender.clone())? + }, + MultiAssetIdConversionResult::Unsupported(_) => Err(Error::::UnsupportedAsset)?, + MultiAssetIdConversionResult::Native => (), + } + match T::MultiAssetIdConverter::try_convert(asset2) { + MultiAssetIdConversionResult::Converted(asset) => + if !T::Assets::contains(&asset, &pool_account) { + T::Assets::touch(asset, pool_account.clone(), sender.clone())? + }, + MultiAssetIdConversionResult::Unsupported(_) => Err(Error::::UnsupportedAsset)?, + MultiAssetIdConversionResult::Native => (), + } + + let lp_token = NextPoolAssetId::::get() + .or(T::PoolAssetId::initial_value()) + .ok_or(Error::::IncorrectPoolAssetId)?; + let next_lp_token_id = lp_token.increment().ok_or(Error::::IncorrectPoolAssetId)?; + NextPoolAssetId::::set(Some(next_lp_token_id)); + + T::PoolAssets::create(lp_token.clone(), pool_account.clone(), false, 1u32.into())?; + T::PoolAssets::touch(lp_token.clone(), pool_account.clone(), sender.clone())?; + + let pool_info = PoolInfo { lp_token: lp_token.clone() }; + Pools::::insert(pool_id.clone(), pool_info); + + Self::deposit_event(Event::PoolCreated { + creator: sender, + pool_id, + pool_account, + lp_token, + }); + + Ok(()) + } + + /// Provide liquidity into the pool of `asset1` and `asset2`. + /// NOTE: an optimal amount of asset1 and asset2 will be calculated and + /// might be different than the provided `amount1_desired`/`amount2_desired` + /// thus you should provide the min amount you're happy to provide. + /// Params `amount1_min`/`amount2_min` represent that. + /// `mint_to` will be sent the liquidity tokens that represent this share of the pool. + /// + /// Once liquidity is added, someone may successfully call + /// [`Pallet::swap_exact_tokens_for_tokens`] successfully. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::add_liquidity())] + pub fn add_liquidity( + origin: OriginFor, + asset1: T::MultiAssetId, + asset2: T::MultiAssetId, + amount1_desired: T::AssetBalance, + amount2_desired: T::AssetBalance, + amount1_min: T::AssetBalance, + amount2_min: T::AssetBalance, + mint_to: T::AccountId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + // swap params if needed + let (amount1_desired, amount2_desired, amount1_min, amount2_min) = + if pool_id.0 == asset1 { + (amount1_desired, amount2_desired, amount1_min, amount2_min) + } else { + (amount2_desired, amount1_desired, amount2_min, amount1_min) + }; + ensure!( + amount1_desired > Zero::zero() && amount2_desired > Zero::zero(), + Error::::WrongDesiredAmount + ); + + let maybe_pool = Pools::::get(&pool_id); + let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; + let pool_account = Self::get_pool_account(&pool_id); + + let (asset1, asset2) = &pool_id; + let reserve1 = Self::get_balance(&pool_account, asset1)?; + let reserve2 = Self::get_balance(&pool_account, asset2)?; + + let amount1: T::AssetBalance; + let amount2: T::AssetBalance; + if reserve1.is_zero() || reserve2.is_zero() { + amount1 = amount1_desired; + amount2 = amount2_desired; + } else { + let amount2_optimal = Self::quote(&amount1_desired, &reserve1, &reserve2)?; + + if amount2_optimal <= amount2_desired { + ensure!( + amount2_optimal >= amount2_min, + Error::::AssetTwoDepositDidNotMeetMinimum + ); + amount1 = amount1_desired; + amount2 = amount2_optimal; + } else { + let amount1_optimal = Self::quote(&amount2_desired, &reserve2, &reserve1)?; + ensure!( + amount1_optimal <= amount1_desired, + Error::::OptimalAmountLessThanDesired + ); + ensure!( + amount1_optimal >= amount1_min, + Error::::AssetOneDepositDidNotMeetMinimum + ); + amount1 = amount1_optimal; + amount2 = amount2_desired; + } + } + + Self::validate_minimal_amount(amount1.saturating_add(reserve1), asset1) + .map_err(|_| Error::::AmountOneLessThanMinimal)?; + Self::validate_minimal_amount(amount2.saturating_add(reserve2), asset2) + .map_err(|_| Error::::AmountTwoLessThanMinimal)?; + + Self::transfer(asset1, &sender, &pool_account, amount1, true)?; + Self::transfer(asset2, &sender, &pool_account, amount2, true)?; + + let total_supply = T::PoolAssets::total_issuance(pool.lp_token.clone()); + + let lp_token_amount: T::AssetBalance; + if total_supply.is_zero() { + lp_token_amount = Self::calc_lp_amount_for_zero_supply(&amount1, &amount2)?; + T::PoolAssets::mint_into( + pool.lp_token.clone(), + &pool_account, + T::MintMinLiquidity::get(), + )?; + } else { + let side1 = Self::mul_div(&amount1, &total_supply, &reserve1)?; + let side2 = Self::mul_div(&amount2, &total_supply, &reserve2)?; + lp_token_amount = side1.min(side2); + } + + ensure!( + lp_token_amount > T::MintMinLiquidity::get(), + Error::::InsufficientLiquidityMinted + ); + + T::PoolAssets::mint_into(pool.lp_token.clone(), &mint_to, lp_token_amount)?; + + Self::deposit_event(Event::LiquidityAdded { + who: sender, + mint_to, + pool_id, + amount1_provided: amount1, + amount2_provided: amount2, + lp_token: pool.lp_token.clone(), + lp_token_minted: lp_token_amount, + }); + + Ok(()) + } + + /// Allows you to remove liquidity by providing the `lp_token_burn` tokens that will be + /// burned in the process. With the usage of `amount1_min_receive`/`amount2_min_receive` + /// it's possible to control the min amount of returned tokens you're happy with. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::remove_liquidity())] + pub fn remove_liquidity( + origin: OriginFor, + asset1: T::MultiAssetId, + asset2: T::MultiAssetId, + lp_token_burn: T::AssetBalance, + amount1_min_receive: T::AssetBalance, + amount2_min_receive: T::AssetBalance, + withdraw_to: T::AccountId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + // swap params if needed + let (amount1_min_receive, amount2_min_receive) = if pool_id.0 == asset1 { + (amount1_min_receive, amount2_min_receive) + } else { + (amount2_min_receive, amount1_min_receive) + }; + let (asset1, asset2) = pool_id.clone(); + + ensure!(lp_token_burn > Zero::zero(), Error::::ZeroLiquidity); + + let maybe_pool = Pools::::get(&pool_id); + let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; + + let pool_account = Self::get_pool_account(&pool_id); + let reserve1 = Self::get_balance(&pool_account, &asset1)?; + let reserve2 = Self::get_balance(&pool_account, &asset2)?; + + let total_supply = T::PoolAssets::total_issuance(pool.lp_token.clone()); + let withdrawal_fee_amount = T::LiquidityWithdrawalFee::get() * lp_token_burn; + let lp_redeem_amount = lp_token_burn.saturating_sub(withdrawal_fee_amount); + + let amount1 = Self::mul_div(&lp_redeem_amount, &reserve1, &total_supply)?; + let amount2 = Self::mul_div(&lp_redeem_amount, &reserve2, &total_supply)?; + + ensure!( + !amount1.is_zero() && amount1 >= amount1_min_receive, + Error::::AssetOneWithdrawalDidNotMeetMinimum + ); + ensure!( + !amount2.is_zero() && amount2 >= amount2_min_receive, + Error::::AssetTwoWithdrawalDidNotMeetMinimum + ); + let reserve1_left = reserve1.saturating_sub(amount1); + let reserve2_left = reserve2.saturating_sub(amount2); + Self::validate_minimal_amount(reserve1_left, &asset1) + .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; + Self::validate_minimal_amount(reserve2_left, &asset2) + .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; + + // burn the provided lp token amount that includes the fee + T::PoolAssets::burn_from(pool.lp_token.clone(), &sender, lp_token_burn, Exact, Polite)?; + + Self::transfer(&asset1, &pool_account, &withdraw_to, amount1, false)?; + Self::transfer(&asset2, &pool_account, &withdraw_to, amount2, false)?; + + Self::deposit_event(Event::LiquidityRemoved { + who: sender, + withdraw_to, + pool_id, + amount1, + amount2, + lp_token: pool.lp_token.clone(), + lp_token_burned: lp_token_burn, + withdrawal_fee: T::LiquidityWithdrawalFee::get(), + }); + + Ok(()) + } + + /// Swap the exact amount of `asset1` into `asset2`. + /// `amount_out_min` param allows you to specify the min amount of the `asset2` + /// you're happy to receive. + /// + /// [`AssetConversionApi::quote_price_exact_tokens_for_tokens`] runtime call can be called + /// for a quote. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::swap_exact_tokens_for_tokens())] + pub fn swap_exact_tokens_for_tokens( + origin: OriginFor, + path: BoundedVec, + amount_in: T::AssetBalance, + amount_out_min: T::AssetBalance, + send_to: T::AccountId, + keep_alive: bool, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Self::do_swap_exact_tokens_for_tokens( + sender, + path, + amount_in, + Some(amount_out_min), + send_to, + keep_alive, + )?; + Ok(()) + } + + /// Swap any amount of `asset1` to get the exact amount of `asset2`. + /// `amount_in_max` param allows to specify the max amount of the `asset1` + /// you're happy to provide. + /// + /// [`AssetConversionApi::quote_price_tokens_for_exact_tokens`] runtime call can be called + /// for a quote. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::swap_tokens_for_exact_tokens())] + pub fn swap_tokens_for_exact_tokens( + origin: OriginFor, + path: BoundedVec, + amount_out: T::AssetBalance, + amount_in_max: T::AssetBalance, + send_to: T::AccountId, + keep_alive: bool, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Self::do_swap_tokens_for_exact_tokens( + sender, + path, + amount_out, + Some(amount_in_max), + send_to, + keep_alive, + )?; + Ok(()) + } + } + + impl Pallet { + /// Swap exactly `amount_in` of asset `path[0]` for asset `path[1]`. + /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire + /// the amount desired. + /// + /// Withdraws the `path[0]` asset from `sender`, deposits the `path[1]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. + pub fn do_swap_exact_tokens_for_tokens( + sender: T::AccountId, + path: BoundedVec, + amount_in: T::AssetBalance, + amount_out_min: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + ensure!(amount_in > Zero::zero(), Error::::ZeroAmount); + if let Some(amount_out_min) = amount_out_min { + ensure!(amount_out_min > Zero::zero(), Error::::ZeroAmount); + } + + Self::validate_swap_path(&path)?; + + let amounts = Self::get_amounts_out(&amount_in, &path)?; + let amount_out = + *amounts.last().defensive_ok_or("get_amounts_out() returned an empty result")?; + + if let Some(amount_out_min) = amount_out_min { + ensure!( + amount_out >= amount_out_min, + Error::::ProvidedMinimumNotSufficientForSwap + ); + } + + Self::do_swap(sender, &amounts, path, send_to, keep_alive)?; + Ok(amount_out) + } + + /// Take the `path[0]` asset and swap some amount for `amount_out` of the `path[1]`. If an + /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be + /// too costly. + /// + /// Withdraws `path[0]` asset from `sender`, deposits the `path[1]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. + pub fn do_swap_tokens_for_exact_tokens( + sender: T::AccountId, + path: BoundedVec, + amount_out: T::AssetBalance, + amount_in_max: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + ensure!(amount_out > Zero::zero(), Error::::ZeroAmount); + if let Some(amount_in_max) = amount_in_max { + ensure!(amount_in_max > Zero::zero(), Error::::ZeroAmount); + } + + Self::validate_swap_path(&path)?; + + let amounts = Self::get_amounts_in(&amount_out, &path)?; + let amount_in = + *amounts.first().defensive_ok_or("get_amounts_in() returned an empty result")?; + + if let Some(amount_in_max) = amount_in_max { + ensure!( + amount_in <= amount_in_max, + Error::::ProvidedMaximumNotSufficientForSwap + ); + } + + Self::do_swap(sender, &amounts, path, send_to, keep_alive)?; + Ok(amount_in) + } + + /// Transfer an `amount` of `asset_id`, respecting the `keep_alive` requirements. + fn transfer( + asset_id: &T::MultiAssetId, + from: &T::AccountId, + to: &T::AccountId, + amount: T::AssetBalance, + keep_alive: bool, + ) -> Result { + let result = match T::MultiAssetIdConverter::try_convert(asset_id) { + MultiAssetIdConversionResult::Converted(asset_id) => + T::Assets::transfer(asset_id, from, to, amount, Expendable), + MultiAssetIdConversionResult::Native => { + let preservation = match keep_alive { + true => Preserve, + false => Expendable, + }; + let amount = Self::convert_asset_balance_to_native_balance(amount)?; + Ok(Self::convert_native_balance_to_asset_balance(T::Currency::transfer( + from, + to, + amount, + preservation, + )?)?) + }, + MultiAssetIdConversionResult::Unsupported(_) => + Err(Error::::UnsupportedAsset.into()), + }; + + if result.is_ok() { + Self::deposit_event(Event::Transfer { + from: from.clone(), + to: to.clone(), + asset: (*asset_id).clone(), + amount, + }); + } + result + } + + /// Convert a `Balance` type to an `AssetBalance`. + pub(crate) fn convert_native_balance_to_asset_balance( + amount: T::Balance, + ) -> Result> { + T::HigherPrecisionBalance::from(amount) + .try_into() + .map_err(|_| Error::::Overflow) + } + + /// Convert an `AssetBalance` type to a `Balance`. + pub(crate) fn convert_asset_balance_to_native_balance( + amount: T::AssetBalance, + ) -> Result> { + T::HigherPrecisionBalance::from(amount) + .try_into() + .map_err(|_| Error::::Overflow) + } + + /// Convert a `HigherPrecisionBalance` type to an `AssetBalance`. + pub(crate) fn convert_hpb_to_asset_balance( + amount: T::HigherPrecisionBalance, + ) -> Result> { + amount.try_into().map_err(|_| Error::::Overflow) + } + + /// Swap assets along a `path`, depositing in `send_to`. + pub(crate) fn do_swap( + sender: T::AccountId, + amounts: &Vec, + path: BoundedVec, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result<(), DispatchError> { + ensure!(amounts.len() > 1, Error::::CorrespondenceError); + if let Some([asset1, asset2]) = &path.get(0..2) { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let pool_account = Self::get_pool_account(&pool_id); + // amounts should always contain a corresponding element to path. + let first_amount = amounts.first().ok_or(Error::::CorrespondenceError)?; + + Self::transfer(asset1, &sender, &pool_account, *first_amount, keep_alive)?; + + let mut i = 0; + let path_len = path.len() as u32; + for assets_pair in path.windows(2) { + if let [asset1, asset2] = assets_pair { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let pool_account = Self::get_pool_account(&pool_id); + + let amount_out = + amounts.get((i + 1) as usize).ok_or(Error::::CorrespondenceError)?; + + let to = if i < path_len - 2 { + let asset3 = path.get((i + 2) as usize).ok_or(Error::::PathError)?; + Self::get_pool_account(&Self::get_pool_id( + asset2.clone(), + asset3.clone(), + )) + } else { + send_to.clone() + }; + + let reserve = Self::get_balance(&pool_account, asset2)?; + let reserve_left = reserve.saturating_sub(*amount_out); + Self::validate_minimal_amount(reserve_left, asset2) + .map_err(|_| Error::::ReserveLeftLessThanMinimal)?; + + Self::transfer(asset2, &pool_account, &to, *amount_out, true)?; + } + i.saturating_inc(); + } + Self::deposit_event(Event::SwapExecuted { + who: sender, + send_to, + path, + amount_in: *first_amount, + amount_out: *amounts.last().expect("Always has more than 1 element"), + }); + } else { + return Err(Error::::InvalidPath.into()) + } + Ok(()) + } + + /// The account ID of the pool. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache + /// the value and only call this once. + pub fn get_pool_account(pool_id: &PoolIdOf) -> T::AccountId { + let encoded_pool_id = sp_io::hashing::blake2_256(&Encode::encode(pool_id)[..]); + + Decode::decode(&mut TrailingZeroInput::new(encoded_pool_id.as_ref())) + .expect("infinite length input; no invalid inputs for type; qed") + } + + /// Get the `owner`'s balance of `asset`, which could be the chain's native asset or another + /// fungible. Returns a value in the form of an `AssetBalance`. + fn get_balance( + owner: &T::AccountId, + asset: &T::MultiAssetId, + ) -> Result> { + match T::MultiAssetIdConverter::try_convert(asset) { + MultiAssetIdConversionResult::Converted(asset_id) => Ok( + <::Assets>::reducible_balance(asset_id, owner, Expendable, Polite), + ), + MultiAssetIdConversionResult::Native => + Self::convert_native_balance_to_asset_balance( + <::Currency>::reducible_balance(owner, Expendable, Polite), + ), + MultiAssetIdConversionResult::Unsupported(_) => + Err(Error::::UnsupportedAsset.into()), + } + } + + /// Returns a pool id constructed from 2 assets. + /// 1. Native asset should be lower than the other asset ids. + /// 2. Two native or two non-native assets are compared by their `Ord` implementation. + /// + /// We expect deterministic order, so (asset1, asset2) or (asset2, asset1) returns the same + /// result. + pub fn get_pool_id(asset1: T::MultiAssetId, asset2: T::MultiAssetId) -> PoolIdOf { + match ( + T::MultiAssetIdConverter::is_native(&asset1), + T::MultiAssetIdConverter::is_native(&asset2), + ) { + (true, false) => return (asset1, asset2), + (false, true) => return (asset2, asset1), + _ => { + // else we want to be deterministic based on `Ord` implementation + if asset1 <= asset2 { + (asset1, asset2) + } else { + (asset2, asset1) + } + }, + } + } + + /// Returns the balance of each asset in the pool. + /// The tuple result is in the order requested (not necessarily the same as pool order). + pub fn get_reserves( + asset1: &T::MultiAssetId, + asset2: &T::MultiAssetId, + ) -> Result<(T::AssetBalance, T::AssetBalance), Error> { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let pool_account = Self::get_pool_account(&pool_id); + + let balance1 = Self::get_balance(&pool_account, asset1)?; + let balance2 = Self::get_balance(&pool_account, asset2)?; + + if balance1.is_zero() || balance2.is_zero() { + Err(Error::::PoolNotFound)?; + } + + Ok((balance1, balance2)) + } + + /// Leading to an amount at the end of a `path`, get the required amounts in. + pub(crate) fn get_amounts_in( + amount_out: &T::AssetBalance, + path: &BoundedVec, + ) -> Result, DispatchError> { + let mut amounts: Vec = vec![*amount_out]; + + for assets_pair in path.windows(2).rev() { + if let [asset1, asset2] = assets_pair { + let (reserve_in, reserve_out) = Self::get_reserves(asset1, asset2)?; + let prev_amount = amounts.last().expect("Always has at least one element"); + let amount_in = Self::get_amount_in(prev_amount, &reserve_in, &reserve_out)?; + amounts.push(amount_in); + } + } + + amounts.reverse(); + Ok(amounts) + } + + /// Following an amount into a `path`, get the corresponding amounts out. + pub(crate) fn get_amounts_out( + amount_in: &T::AssetBalance, + path: &BoundedVec, + ) -> Result, DispatchError> { + let mut amounts: Vec = vec![*amount_in]; + + for assets_pair in path.windows(2) { + if let [asset1, asset2] = assets_pair { + let (reserve_in, reserve_out) = Self::get_reserves(asset1, asset2)?; + let prev_amount = amounts.last().expect("Always has at least one element"); + let amount_out = Self::get_amount_out(prev_amount, &reserve_in, &reserve_out)?; + amounts.push(amount_out); + } + } + + Ok(amounts) + } + + /// Used by the RPC service to provide current prices. + pub fn quote_price_exact_tokens_for_tokens( + asset1: T::MultiAssetId, + asset2: T::MultiAssetId, + amount: T::AssetBalance, + include_fee: bool, + ) -> Option { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let pool_account = Self::get_pool_account(&pool_id); + + let balance1 = Self::get_balance(&pool_account, &asset1).ok()?; + let balance2 = Self::get_balance(&pool_account, &asset2).ok()?; + if !balance1.is_zero() { + if include_fee { + Self::get_amount_out(&amount, &balance1, &balance2).ok() + } else { + Self::quote(&amount, &balance1, &balance2).ok() + } + } else { + None + } + } + + /// Used by the RPC service to provide current prices. + pub fn quote_price_tokens_for_exact_tokens( + asset1: T::MultiAssetId, + asset2: T::MultiAssetId, + amount: T::AssetBalance, + include_fee: bool, + ) -> Option { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let pool_account = Self::get_pool_account(&pool_id); + + let balance1 = Self::get_balance(&pool_account, &asset1).ok()?; + let balance2 = Self::get_balance(&pool_account, &asset2).ok()?; + if !balance1.is_zero() { + if include_fee { + Self::get_amount_in(&amount, &balance1, &balance2).ok() + } else { + Self::quote(&amount, &balance2, &balance1).ok() + } + } else { + None + } + } + + /// Calculates the optimal amount from the reserves. + pub fn quote( + amount: &T::AssetBalance, + reserve1: &T::AssetBalance, + reserve2: &T::AssetBalance, + ) -> Result> { + // amount * reserve2 / reserve1 + Self::mul_div(amount, reserve2, reserve1) + } + + pub(super) fn calc_lp_amount_for_zero_supply( + amount1: &T::AssetBalance, + amount2: &T::AssetBalance, + ) -> Result> { + let amount1 = T::HigherPrecisionBalance::from(*amount1); + let amount2 = T::HigherPrecisionBalance::from(*amount2); + + let result = amount1 + .checked_mul(&amount2) + .ok_or(Error::::Overflow)? + .integer_sqrt() + .checked_sub(&T::MintMinLiquidity::get().into()) + .ok_or(Error::::InsufficientLiquidityMinted)?; + + result.try_into().map_err(|_| Error::::Overflow) + } + + fn mul_div( + a: &T::AssetBalance, + b: &T::AssetBalance, + c: &T::AssetBalance, + ) -> Result> { + let a = T::HigherPrecisionBalance::from(*a); + let b = T::HigherPrecisionBalance::from(*b); + let c = T::HigherPrecisionBalance::from(*c); + + let result = a + .checked_mul(&b) + .ok_or(Error::::Overflow)? + .checked_div(&c) + .ok_or(Error::::Overflow)?; + + result.try_into().map_err(|_| Error::::Overflow) + } + + /// Calculates amount out. + /// + /// Given an input amount of an asset and pair reserves, returns the maximum output amount + /// of the other asset. + pub fn get_amount_out( + amount_in: &T::AssetBalance, + reserve_in: &T::AssetBalance, + reserve_out: &T::AssetBalance, + ) -> Result> { + let amount_in = T::HigherPrecisionBalance::from(*amount_in); + let reserve_in = T::HigherPrecisionBalance::from(*reserve_in); + let reserve_out = T::HigherPrecisionBalance::from(*reserve_out); + + if reserve_in.is_zero() || reserve_out.is_zero() { + return Err(Error::::ZeroLiquidity.into()) + } + + let amount_in_with_fee = amount_in + .checked_mul(&(T::HigherPrecisionBalance::from(1000u32) - (T::LPFee::get().into()))) + .ok_or(Error::::Overflow)?; + + let numerator = + amount_in_with_fee.checked_mul(&reserve_out).ok_or(Error::::Overflow)?; + + let denominator = reserve_in + .checked_mul(&1000u32.into()) + .ok_or(Error::::Overflow)? + .checked_add(&amount_in_with_fee) + .ok_or(Error::::Overflow)?; + + let result = numerator.checked_div(&denominator).ok_or(Error::::Overflow)?; + + result.try_into().map_err(|_| Error::::Overflow) + } + + /// Calculates amount in. + /// + /// Given an output amount of an asset and pair reserves, returns a required input amount + /// of the other asset. + pub fn get_amount_in( + amount_out: &T::AssetBalance, + reserve_in: &T::AssetBalance, + reserve_out: &T::AssetBalance, + ) -> Result> { + let amount_out = T::HigherPrecisionBalance::from(*amount_out); + let reserve_in = T::HigherPrecisionBalance::from(*reserve_in); + let reserve_out = T::HigherPrecisionBalance::from(*reserve_out); + + if reserve_in.is_zero() || reserve_out.is_zero() { + Err(Error::::ZeroLiquidity.into())? + } + + if amount_out >= reserve_out { + Err(Error::::AmountOutTooHigh.into())? + } + + let numerator = reserve_in + .checked_mul(&amount_out) + .ok_or(Error::::Overflow)? + .checked_mul(&1000u32.into()) + .ok_or(Error::::Overflow)?; + + let denominator = reserve_out + .checked_sub(&amount_out) + .ok_or(Error::::Overflow)? + .checked_mul(&(T::HigherPrecisionBalance::from(1000u32) - T::LPFee::get().into())) + .ok_or(Error::::Overflow)?; + + let result = numerator + .checked_div(&denominator) + .ok_or(Error::::Overflow)? + .checked_add(&One::one()) + .ok_or(Error::::Overflow)?; + + result.try_into().map_err(|_| Error::::Overflow) + } + + /// Ensure that a `value` meets the minimum balance requirements of an `asset` class. + fn validate_minimal_amount( + value: T::AssetBalance, + asset: &T::MultiAssetId, + ) -> Result<(), ()> { + if T::MultiAssetIdConverter::is_native(asset) { + let ed = T::Currency::minimum_balance(); + ensure!( + T::HigherPrecisionBalance::from(value) >= T::HigherPrecisionBalance::from(ed), + () + ); + } else { + let MultiAssetIdConversionResult::Converted(asset_id) = T::MultiAssetIdConverter::try_convert(asset) else { + return Err(()) + }; + let minimal = T::Assets::minimum_balance(asset_id); + ensure!(value >= minimal, ()); + } + Ok(()) + } + + /// Ensure that a path is valid. + fn validate_swap_path( + path: &BoundedVec, + ) -> Result<(), DispatchError> { + ensure!(path.len() >= 2, Error::::InvalidPath); + + // validate all the pools in the path are unique + let mut pools = BoundedBTreeSet::, T::MaxSwapPathLength>::new(); + for assets_pair in path.windows(2) { + if let [asset1, asset2] = assets_pair { + let pool_id = Self::get_pool_id(asset1.clone(), asset2.clone()); + let new_element = + pools.try_insert(pool_id).map_err(|_| Error::::Overflow)?; + if !new_element { + return Err(Error::::NonUniquePath.into()) + } + } + } + Ok(()) + } + + /// Returns the next pool asset id for benchmark purposes only. + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn get_next_pool_asset_id() -> T::PoolAssetId { + NextPoolAssetId::::get() + .or(T::PoolAssetId::initial_value()) + .expect("Next pool asset ID can not be None") + } + } +} + +impl Swap for Pallet { + fn swap_exact_tokens_for_tokens( + sender: T::AccountId, + path: Vec, + amount_in: T::HigherPrecisionBalance, + amount_out_min: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + let path = path.try_into().map_err(|_| Error::::PathError)?; + let amount_out_min = amount_out_min.map(Self::convert_hpb_to_asset_balance).transpose()?; + let amount_out = Self::do_swap_exact_tokens_for_tokens( + sender, + path, + Self::convert_hpb_to_asset_balance(amount_in)?, + amount_out_min, + send_to, + keep_alive, + )?; + Ok(amount_out.into()) + } + + fn swap_tokens_for_exact_tokens( + sender: T::AccountId, + path: Vec, + amount_out: T::HigherPrecisionBalance, + amount_in_max: Option, + send_to: T::AccountId, + keep_alive: bool, + ) -> Result { + let path = path.try_into().map_err(|_| Error::::PathError)?; + let amount_in_max = amount_in_max.map(Self::convert_hpb_to_asset_balance).transpose()?; + let amount_in = Self::do_swap_tokens_for_exact_tokens( + sender, + path, + Self::convert_hpb_to_asset_balance(amount_out)?, + amount_in_max, + send_to, + keep_alive, + )?; + Ok(amount_in.into()) + } +} + +sp_api::decl_runtime_apis! { + /// This runtime api allows people to query the size of the liquidity pools + /// and quote prices for swaps. + pub trait AssetConversionApi where + Balance: Codec + MaybeDisplay, + AssetBalance: frame_support::traits::tokens::Balance, + AssetId: Codec + { + /// Provides a quote for [`Pallet::swap_tokens_for_exact_tokens`]. + /// + /// Note that the price may have changed by the time the transaction is executed. + /// (Use `amount_in_max` to control slippage.) + fn quote_price_tokens_for_exact_tokens(asset1: AssetId, asset2: AssetId, amount: AssetBalance, include_fee: bool) -> Option; + + /// Provides a quote for [`Pallet::swap_exact_tokens_for_tokens`]. + /// + /// Note that the price may have changed by the time the transaction is executed. + /// (Use `amount_out_min` to control slippage.) + fn quote_price_exact_tokens_for_tokens(asset1: AssetId, asset2: AssetId, amount: AssetBalance, include_fee: bool) -> Option; + + /// Returns the size of the liquidity pool for the given asset pair. + fn get_reserves(asset1: AssetId, asset2: AssetId) -> Option<(Balance, Balance)>; + } +} + +sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/frame/asset-conversion/src/mock.rs b/frame/asset-conversion/src/mock.rs new file mode 100644 index 0000000000000..7fe81b814047d --- /dev/null +++ b/frame/asset-conversion/src/mock.rs @@ -0,0 +1,192 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Asset Conversion pallet. + +use super::*; +use crate as pallet_asset_conversion; + +use frame_support::{ + construct_runtime, + instances::{Instance1, Instance2}, + ord_parameter_types, parameter_types, + traits::{AsEnsureOriginWithArg, ConstU128, ConstU32, ConstU64}, + PalletId, +}; +use frame_system::{EnsureSigned, EnsureSignedBy}; +use sp_arithmetic::Permill; +use sp_core::H256; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Assets: pallet_assets::, + PoolAssets: pallet_assets::, + AssetConversion: pallet_asset_conversion, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u128; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU128<100>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type MaxHolds = (); +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = u128; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type CallbackHandle = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = u128; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = + AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = ConstU128<0>; + type AssetAccountDeposit = ConstU128<0>; + type MetadataDepositBase = ConstU128<0>; + type MetadataDepositPerByte = ConstU128<0>; + type ApprovalDeposit = ConstU128<0>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type CallbackHandle = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub storage AllowMultiAssetPools: bool = true; + pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero +} + +ord_parameter_types! { + pub const AssetConversionOrigin: u128 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type AssetBalance = ::Balance; + type AssetId = u32; + type PoolAssetId = u32; + type Assets = Assets; + type PoolAssets = PoolAssets; + type PalletId = AssetConversionPalletId; + type WeightInfo = (); + type LPFee = ConstU32<3>; // means 0.3% + type PoolSetupFee = ConstU128<100>; // should be more or equal to the existential deposit + type PoolSetupFeeReceiver = AssetConversionOrigin; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type AllowMultiAssetPools = AllowMultiAssetPools; + type MaxSwapPathLength = ConstU32<4>; + type MintMinLiquidity = ConstU128<100>; // 100 is good enough when the main currency has 12 decimals. + + type Balance = u128; + type HigherPrecisionBalance = sp_core::U256; + + type MultiAssetId = NativeOrAssetId; + type MultiAssetIdConverter = NativeOrAssetIdConverter; + + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10000), (2, 20000), (3, 30000), (4, 40000)], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/asset-conversion/src/tests.rs b/frame/asset-conversion/src/tests.rs new file mode 100644 index 0000000000000..80faf5363b011 --- /dev/null +++ b/frame/asset-conversion/src/tests.rs @@ -0,0 +1,1418 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{mock::*, *}; +use frame_support::{ + assert_noop, assert_ok, + instances::Instance1, + traits::{fungible::Inspect, fungibles::InspectEnumerable, Get}, +}; +use sp_arithmetic::Permill; +use sp_runtime::{DispatchError, TokenError}; + +fn events() -> Vec> { + let result = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let mock::RuntimeEvent::AssetConversion(inner) = e { + Some(inner) + } else { + None + } + }) + .collect(); + + System::reset_events(); + + result +} + +fn pools() -> Vec> { + let mut s: Vec<_> = Pools::::iter().map(|x| x.0).collect(); + s.sort(); + s +} + +fn assets() -> Vec> { + // if the storage would be public: + // let mut s: Vec<_> = pallet_assets::pallet::Asset::::iter().map(|x| x.0).collect(); + let mut s: Vec<_> = <::Assets>::asset_ids() + .map(|id| NativeOrAssetId::Asset(id)) + .collect(); + s.sort(); + s +} + +fn pool_assets() -> Vec { + let mut s: Vec<_> = <::PoolAssets>::asset_ids().collect(); + s.sort(); + s +} + +fn create_tokens(owner: u128, tokens: Vec>) { + for token_id in tokens { + let MultiAssetIdConversionResult::Converted(asset_id) = NativeOrAssetIdConverter::try_convert(&token_id) else { unreachable!("invalid token") }; + assert_ok!(Assets::force_create(RuntimeOrigin::root(), asset_id, owner, false, 1)); + } +} + +fn balance(owner: u128, token_id: NativeOrAssetId) -> u128 { + match token_id { + NativeOrAssetId::Native => <::Currency>::free_balance(owner), + NativeOrAssetId::Asset(token_id) => <::Assets>::balance(token_id, owner), + } +} + +fn pool_balance(owner: u128, token_id: u32) -> u128 { + <::PoolAssets>::balance(token_id, owner) +} + +fn get_ed() -> u128 { + <::Currency>::minimum_balance() +} + +macro_rules! bvec { + ($( $x:tt )*) => { + vec![$( $x )*].try_into().unwrap() + } +} + +#[test] +fn check_pool_accounts_dont_collide() { + use std::collections::HashSet; + let mut map = HashSet::new(); + + for i in 0..1_000_000u32 { + let account = AssetConversion::get_pool_account(&( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(i), + )); + if map.contains(&account) { + panic!("Collision at {}", i); + } + map.insert(account); + } +} + +#[test] +fn check_max_numbers() { + new_test_ext().execute_with(|| { + assert_eq!(AssetConversion::quote(&3u128, &u128::MAX, &u128::MAX).ok().unwrap(), 3); + assert!(AssetConversion::quote(&u128::MAX, &3u128, &u128::MAX).is_err()); + assert_eq!(AssetConversion::quote(&u128::MAX, &u128::MAX, &1u128).ok().unwrap(), 1); + + assert_eq!( + AssetConversion::get_amount_out(&100u128, &u128::MAX, &u128::MAX).ok().unwrap(), + 99 + ); + assert_eq!( + AssetConversion::get_amount_in(&100u128, &u128::MAX, &u128::MAX).ok().unwrap(), + 101 + ); + }); +} + +#[test] +fn can_create_pool() { + new_test_ext().execute_with(|| { + let asset_account_deposit: u128 = + >::AssetAccountDeposit::get(); + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + + create_tokens(user, vec![token_2]); + + let lp_token = AssetConversion::get_next_pool_asset_id(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 1000)); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + + let setup_fee = <::PoolSetupFee as Get<::Balance>>::get(); + let pool_account = <::PoolSetupFeeReceiver as Get>::get(); + assert_eq!( + balance(user, NativeOrAssetId::Native), + 1000 - (setup_fee + asset_account_deposit) + ); + assert_eq!(balance(pool_account, NativeOrAssetId::Native), setup_fee); + assert_eq!(lp_token + 1, AssetConversion::get_next_pool_asset_id()); + + assert_eq!( + events(), + [Event::::PoolCreated { + creator: user, + pool_id, + pool_account: AssetConversion::get_pool_account(&pool_id), + lp_token + }] + ); + assert_eq!(pools(), vec![pool_id]); + assert_eq!(assets(), vec![token_2]); + assert_eq!(pool_assets(), vec![lp_token]); + + assert_noop!( + AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_1), + Error::::EqualAssets + ); + assert_noop!( + AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_2), + Error::::EqualAssets + ); + + // validate we can create Asset(1)/Asset(2) pool + let token_1 = NativeOrAssetId::Asset(1); + create_tokens(user, vec![token_1]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + // validate we can force the first asset to be the Native currency only + AllowMultiAssetPools::set(&false); + let token_1 = NativeOrAssetId::Asset(3); + assert_noop!( + AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2), + Error::::PoolMustContainNativeCurrency + ); + }); +} + +#[test] +fn create_same_pool_twice_should_fail() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + + let lp_token = AssetConversion::get_next_pool_asset_id(); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + let expected_free = lp_token + 1; + assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); + + assert_noop!( + AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1), + Error::::PoolExists + ); + assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); + + // Try switching the same tokens around: + assert_noop!( + AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2), + Error::::PoolExists + ); + assert_eq!(expected_free, AssetConversion::get_next_pool_asset_id()); + }); +} + +#[test] +fn different_pools_should_have_different_lp_tokens() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let token_3 = NativeOrAssetId::Asset(3); + let pool_id_1_2 = (token_1, token_2); + let pool_id_1_3 = (token_1, token_3); + + create_tokens(user, vec![token_2, token_3]); + + let lp_token2_1 = AssetConversion::get_next_pool_asset_id(); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_1)); + let lp_token3_1 = AssetConversion::get_next_pool_asset_id(); + + assert_eq!( + events(), + [Event::::PoolCreated { + creator: user, + pool_id: pool_id_1_2, + pool_account: AssetConversion::get_pool_account(&pool_id_1_2), + lp_token: lp_token2_1 + }] + ); + + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_3, token_1)); + assert_eq!( + events(), + [Event::::PoolCreated { + creator: user, + pool_id: pool_id_1_3, + pool_account: AssetConversion::get_pool_account(&pool_id_1_3), + lp_token: lp_token3_1, + }] + ); + + assert_ne!(lp_token2_1, lp_token3_1); + }); +} + +#[test] +fn can_add_liquidity() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let token_3 = NativeOrAssetId::Asset(3); + + create_tokens(user, vec![token_2, token_3]); + let lp_token1 = AssetConversion::get_next_pool_asset_id(); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + let lp_token2 = AssetConversion::get_next_pool_asset_id(); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_3)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 10, + 10000, + 10, + user, + )); + + let pool_id = (token_1, token_2); + assert!(events().contains(&Event::::LiquidityAdded { + who: user, + mint_to: user, + pool_id, + amount1_provided: 10000, + amount2_provided: 10, + lp_token: lp_token1, + lp_token_minted: 216, + })); + let pallet_account = AssetConversion::get_pool_account(&pool_id); + assert_eq!(balance(pallet_account, token_1), 10000); + assert_eq!(balance(pallet_account, token_2), 10); + assert_eq!(balance(user, token_1), 10000 + ed); + assert_eq!(balance(user, token_2), 1000 - 10); + assert_eq!(pool_balance(user, lp_token1), 216); + + // try to pass the non-native - native assets, the result should be the same + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_3, + token_1, + 10, + 10000, + 10, + 10000, + user, + )); + + let pool_id = (token_1, token_3); + assert!(events().contains(&Event::::LiquidityAdded { + who: user, + mint_to: user, + pool_id, + amount1_provided: 10000, + amount2_provided: 10, + lp_token: lp_token2, + lp_token_minted: 216, + })); + let pallet_account = AssetConversion::get_pool_account(&pool_id); + assert_eq!(balance(pallet_account, token_1), 10000); + assert_eq!(balance(pallet_account, token_3), 10); + assert_eq!(balance(user, token_1), ed); + assert_eq!(balance(user, token_3), 1000 - 10); + assert_eq!(pool_balance(user, lp_token2), 216); + }); +} + +#[test] +fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_noop!( + AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 1, + 1, + 1, + 1, + user + ), + Error::::AmountOneLessThanMinimal + ); + + assert_noop!( + AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + get_ed(), + 1, + 1, + 1, + user + ), + Error::::InsufficientLiquidityMinted + ); + }); +} + +#[test] +fn add_tiny_liquidity_directly_to_pool_address() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let token_3 = NativeOrAssetId::Asset(3); + + create_tokens(user, vec![token_2, token_3]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_3)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, 1000)); + + // check we're still able to add the liquidity even when the pool already has some token_1 + let pallet_account = AssetConversion::get_pool_account(&(token_1, token_2)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), pallet_account, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 10, + 10000, + 10, + user, + )); + + // check the same but for token_3 (non-native token) + let pallet_account = AssetConversion::get_pool_account(&(token_1, token_3)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, pallet_account, 1)); + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_3, + 10000, + 10, + 10000, + 10, + user, + )); + }); +} + +#[test] +fn can_remove_liquidity() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + + create_tokens(user, vec![token_2]); + let lp_token = AssetConversion::get_next_pool_asset_id(); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000000000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 100000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 1000000000, + 100000, + 1000000000, + 100000, + user, + )); + + let total_lp_received = pool_balance(user, lp_token); + LiquidityWithdrawalFee::set(&Permill::from_percent(10)); + + assert_ok!(AssetConversion::remove_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + total_lp_received, + 0, + 0, + user, + )); + + assert!(events().contains(&Event::::LiquidityRemoved { + who: user, + withdraw_to: user, + pool_id, + amount1: 899991000, + amount2: 89999, + lp_token, + lp_token_burned: total_lp_received, + withdrawal_fee: ::LiquidityWithdrawalFee::get() + })); + + let pool_account = AssetConversion::get_pool_account(&pool_id); + assert_eq!(balance(pool_account, token_1), 100009000); + assert_eq!(balance(pool_account, token_2), 10001); + assert_eq!(pool_balance(pool_account, lp_token), 100); + + assert_eq!(balance(user, token_1), 10000000000 - 1000000000 + 899991000); + assert_eq!(balance(user, token_2), 89999); + assert_eq!(pool_balance(user, lp_token), 0); + }); +} + +#[test] +fn can_not_redeem_more_lp_tokens_than_were_minted() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let lp_token = AssetConversion::get_next_pool_asset_id(); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + get_ed())); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 10, + 10000, + 10, + user, + )); + + // Only 216 lp_tokens_minted + assert_eq!(pool_balance(user, lp_token), 216); + + assert_noop!( + AssetConversion::remove_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 216 + 1, // Try and redeem 10 lp tokens while only 9 minted. + 0, + 0, + user, + ), + DispatchError::Token(TokenError::FundsUnavailable) + ); + }); +} + +#[test] +fn can_quote_price() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 200, + 1, + 1, + user, + )); + + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 3000, + false, + ), + Some(60) + ); + // Check it still gives same price: + // (if the above accidentally exchanged then it would not give same quote as before) + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 3000, + false, + ), + Some(60) + ); + + // Check inverse: + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + 60, + false, + ), + Some(3000) + ); + }); +} + +#[test] +fn can_swap_with_native() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let input_amount = 100; + let expect_receive = + AssetConversion::get_amount_out(&input_amount, &liquidity2, &liquidity1) + .ok() + .unwrap(); + + assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + input_amount, + 1, + user, + false, + )); + + let pallet_account = AssetConversion::get_pool_account(&pool_id); + assert_eq!(balance(user, token_1), expect_receive + ed); + assert_eq!(balance(user, token_2), 1000 - liquidity2 - input_amount); + assert_eq!(balance(pallet_account, token_1), liquidity1 - expect_receive); + assert_eq!(balance(pallet_account, token_2), liquidity2 + input_amount); + }); +} + +#[test] +fn can_swap_with_realistic_values() { + new_test_ext().execute_with(|| { + let user = 1; + let dot = NativeOrAssetId::Native; + let usd = NativeOrAssetId::Asset(2); + create_tokens(user, vec![usd]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), dot, usd)); + + const UNIT: u128 = 1_000_000_000; + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 300_000 * UNIT)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1_100_000 * UNIT)); + + let liquidity_dot = 200_000 * UNIT; // ratio for a 5$ price + let liquidity_usd = 1_000_000 * UNIT; + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + dot, + usd, + liquidity_dot, + liquidity_usd, + 1, + 1, + user, + )); + + let input_amount = 10 * UNIT; // usd + + assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![usd, dot], + input_amount, + 1, + user, + false, + )); + + assert!(events().contains(&Event::::SwapExecuted { + who: user, + send_to: user, + path: bvec![usd, dot], + amount_in: 10 * UNIT, // usd + amount_out: 1_993_980_120, // About 2 dot after div by UNIT. + })); + }); +} + +#[test] +fn can_not_swap_in_pool_with_no_liquidity_added_yet() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + // Check can't swap an empty pool + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + 10, + 1, + user, + false, + ), + Error::::PoolNotFound + ); + }); +} + +#[test] +fn check_no_panic_when_try_swap_close_to_empty_pool() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + let lp_token = AssetConversion::get_next_pool_asset_id(); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let lp_token_minted = pool_balance(user, lp_token); + assert!(events().contains(&Event::::LiquidityAdded { + who: user, + mint_to: user, + pool_id, + amount1_provided: liquidity1, + amount2_provided: liquidity2, + lp_token, + lp_token_minted, + })); + + let pallet_account = AssetConversion::get_pool_account(&pool_id); + assert_eq!(balance(pallet_account, token_1), liquidity1); + assert_eq!(balance(pallet_account, token_2), liquidity2); + + assert_ok!(AssetConversion::remove_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + lp_token_minted, + 1, + 1, + user, + )); + + // Now, the pool should exist but be almost empty. + // Let's try and drain it. + assert_eq!(balance(pallet_account, token_1), 708); + assert_eq!(balance(pallet_account, token_2), 15); + + // validate the reserve should always stay above the ED + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + 708 - ed + 1, // amount_out + 500, // amount_in_max + user, + false, + ), + Error::::ReserveLeftLessThanMinimal + ); + + assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + 608, // amount_out + 500, // amount_in_max + user, + false, + )); + + let token_1_left = balance(pallet_account, token_1); + let token_2_left = balance(pallet_account, token_2); + assert_eq!(token_1_left, 708 - 608); + + // The price for the last tokens should be very high + assert_eq!( + AssetConversion::get_amount_in(&(token_1_left - 1), &token_2_left, &token_1_left) + .ok() + .unwrap(), + 10625 + ); + + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + token_1_left - 1, // amount_out + 1000, // amount_in_max + user, + false, + ), + Error::::ProvidedMaximumNotSufficientForSwap + ); + + // Try to swap what's left in the pool + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + token_1_left, // amount_out + 1000, // amount_in_max + user, + false, + ), + Error::::AmountOutTooHigh + ); + }); +} + +#[test] +fn swap_should_not_work_if_too_much_slippage() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + get_ed())); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let exchange_amount = 100; + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_2, token_1], + exchange_amount, // amount_in + 4000, // amount_out_min + user, + false, + ), + Error::::ProvidedMinimumNotSufficientForSwap + ); + }); +} + +#[test] +fn can_swap_tokens_for_exact_tokens() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let pallet_account = AssetConversion::get_pool_account(&pool_id); + let before1 = balance(pallet_account, token_1) + balance(user, token_1); + let before2 = balance(pallet_account, token_2) + balance(user, token_2); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let exchange_out = 50; + let expect_in = AssetConversion::get_amount_in(&exchange_out, &liquidity1, &liquidity2) + .ok() + .unwrap(); + + assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2], + exchange_out, // amount_out + 3500, // amount_in_max + user, + true, + )); + + assert_eq!(balance(user, token_1), 10000 + ed - expect_in); + assert_eq!(balance(user, token_2), 1000 - liquidity2 + exchange_out); + assert_eq!(balance(pallet_account, token_1), liquidity1 + expect_in); + assert_eq!(balance(pallet_account, token_2), liquidity2 - exchange_out); + + // check invariants: + + // native and asset totals should be preserved. + assert_eq!(before1, balance(pallet_account, token_1) + balance(user, token_1)); + assert_eq!(before2, balance(pallet_account, token_2) + balance(user, token_2)); + }); +} + +#[test] +fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let pool_id = (token_1, token_2); + let lp_token = AssetConversion::get_next_pool_asset_id(); + + create_tokens(user2, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user2), token_1, token_2)); + + let ed = get_ed(); + let base1 = 10000; + let base2 = 1000; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 + ed)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, base1 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user2, base2)); + + let pallet_account = AssetConversion::get_pool_account(&pool_id); + let before1 = + balance(pallet_account, token_1) + balance(user, token_1) + balance(user2, token_1); + let before2 = + balance(pallet_account, token_2) + balance(user, token_2) + balance(user2, token_2); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user2), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user2, + )); + + assert_eq!(balance(user, token_1), base1 + ed); + assert_eq!(balance(user, token_2), 0); + + let exchange_out = 50; + let expect_in = AssetConversion::get_amount_in(&exchange_out, &liquidity1, &liquidity2) + .ok() + .unwrap(); + + assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2], + exchange_out, // amount_out + 3500, // amount_in_max + user, + true, + )); + + assert_eq!(balance(user, token_1), base1 + ed - expect_in); + assert_eq!(balance(pallet_account, token_1), liquidity1 + expect_in); + assert_eq!(balance(user, token_2), exchange_out); + assert_eq!(balance(pallet_account, token_2), liquidity2 - exchange_out); + + // check invariants: + + // native and asset totals should be preserved. + assert_eq!( + before1, + balance(pallet_account, token_1) + balance(user, token_1) + balance(user2, token_1) + ); + assert_eq!( + before2, + balance(pallet_account, token_2) + balance(user, token_2) + balance(user2, token_2) + ); + + let lp_token_minted = pool_balance(user2, lp_token); + assert_eq!(lp_token_minted, 1314); + + assert_ok!(AssetConversion::remove_liquidity( + RuntimeOrigin::signed(user2), + token_1, + token_2, + lp_token_minted, + 0, + 0, + user2, + )); + }); +} + +#[test] +fn swap_when_existential_deposit_would_cause_reaping_but_keep_alive_set() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user2, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user2), token_1, token_2)); + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 101)); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user2, 10000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user2), 2, user2, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user2), + token_1, + token_2, + 10000, + 200, + 1, + 1, + user2, + )); + + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2], + 1, // amount_out + 101, // amount_in_max + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2], + 51, // amount_in + 1, // amount_out_min + user, + true, + ), + DispatchError::Token(TokenError::NotExpendable) + ); + }); +} + +#[test] +fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 20000 + get_ed())); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + let liquidity1 = 10000; + let liquidity2 = 200; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + + let exchange_out = 1; + + assert_noop!( + AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2], + exchange_out, // amount_out + 50, // amount_in_max just greater than slippage. + user, + true + ), + Error::::ProvidedMaximumNotSufficientForSwap + ); + }); +} + +#[test] +fn swap_exact_tokens_for_tokens_in_multi_hops() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let token_3 = NativeOrAssetId::Asset(3); + + create_tokens(user, vec![token_2, token_3]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_3)); + + let ed = get_ed(); + let base1 = 10000; + let base2 = 10000; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, base2)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, base2)); + + let liquidity1 = 10000; + let liquidity2 = 200; + let liquidity3 = 2000; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_2, + token_3, + liquidity2, + liquidity3, + 1, + 1, + user, + )); + + let input_amount = 500; + let expect_out2 = AssetConversion::get_amount_out(&input_amount, &liquidity1, &liquidity2) + .ok() + .unwrap(); + let expect_out3 = AssetConversion::get_amount_out(&expect_out2, &liquidity2, &liquidity3) + .ok() + .unwrap(); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1], + input_amount, + 80, + user, + true, + ), + Error::::InvalidPath + ); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2, token_3, token_2], + input_amount, + 80, + user, + true, + ), + Error::::NonUniquePath + ); + + assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2, token_3], + input_amount, // amount_in + 80, // amount_out_min + user, + true, + )); + + let pool_id1 = (token_1, token_2); + let pool_id2 = (token_2, token_3); + let pallet_account1 = AssetConversion::get_pool_account(&pool_id1); + let pallet_account2 = AssetConversion::get_pool_account(&pool_id2); + + assert_eq!(balance(user, token_1), base1 + ed - input_amount); + assert_eq!(balance(pallet_account1, token_1), liquidity1 + input_amount); + assert_eq!(balance(pallet_account1, token_2), liquidity2 - expect_out2); + assert_eq!(balance(pallet_account2, token_2), liquidity2 + expect_out2); + assert_eq!(balance(pallet_account2, token_3), liquidity3 - expect_out3); + assert_eq!(balance(user, token_3), 10000 - liquidity3 + expect_out3); + }); +} + +#[test] +fn swap_tokens_for_exact_tokens_in_multi_hops() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + let token_3 = NativeOrAssetId::Asset(3); + + create_tokens(user, vec![token_2, token_3]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_2, token_3)); + + let ed = get_ed(); + let base1 = 10000; + let base2 = 10000; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, base1 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, base2)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 3, user, base2)); + + let liquidity1 = 10000; + let liquidity2 = 200; + let liquidity3 = 2000; + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + liquidity1, + liquidity2, + 1, + 1, + user, + )); + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_2, + token_3, + liquidity2, + liquidity3, + 1, + 1, + user, + )); + + let exchange_out3 = 100; + let expect_in2 = AssetConversion::get_amount_in(&exchange_out3, &liquidity2, &liquidity3) + .ok() + .unwrap(); + let expect_in1 = AssetConversion::get_amount_in(&expect_in2, &liquidity1, &liquidity2) + .ok() + .unwrap(); + + assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_2, token_3], + exchange_out3, // amount_out + 1000, // amount_in_max + user, + true, + )); + + let pool_id1 = (token_1, token_2); + let pool_id2 = (token_2, token_3); + let pallet_account1 = AssetConversion::get_pool_account(&pool_id1); + let pallet_account2 = AssetConversion::get_pool_account(&pool_id2); + + assert_eq!(balance(user, token_1), base1 + ed - expect_in1); + assert_eq!(balance(pallet_account1, token_1), liquidity1 + expect_in1); + assert_eq!(balance(pallet_account1, token_2), liquidity2 - expect_in2); + assert_eq!(balance(pallet_account2, token_2), liquidity2 + expect_in2); + assert_eq!(balance(pallet_account2, token_3), liquidity3 - exchange_out3); + assert_eq!(balance(user, token_3), 10000 - liquidity3 + exchange_out3); + }); +} + +#[test] +fn can_not_swap_same_asset() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrAssetId::Asset(1); + + create_tokens(user, vec![token_1]); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 1000)); + + let liquidity1 = 1000; + let liquidity2 = 20; + assert_noop!( + AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_1, + liquidity1, + liquidity2, + 1, + 1, + user, + ), + Error::::PoolNotFound + ); + + let exchange_amount = 10; + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![token_1, token_1], + exchange_amount, + 1, + user, + true, + ), + Error::::PoolNotFound + ); + + assert_noop!( + AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user), + bvec![NativeOrAssetId::Native, NativeOrAssetId::Native], + exchange_amount, + 1, + user, + true, + ), + Error::::PoolNotFound + ); + }); +} + +#[test] +fn validate_pool_id_sorting() { + new_test_ext().execute_with(|| { + use crate::NativeOrAssetId::{Asset, Native}; + assert_eq!(AssetConversion::get_pool_id(Native, Asset(2)), (Native, Asset(2))); + assert_eq!(AssetConversion::get_pool_id(Asset(2), Native), (Native, Asset(2))); + assert_eq!(AssetConversion::get_pool_id(Native, Native), (Native, Native)); + assert_eq!(AssetConversion::get_pool_id(Asset(2), Asset(1)), (Asset(1), Asset(2))); + assert!(Asset(2) > Asset(1)); + assert!(Asset(1) <= Asset(1)); + assert_eq!(Asset(1), Asset(1)); + assert_eq!(Native::, Native::); + assert!(Native < Asset(1)); + }); +} + +#[test] +fn cannot_block_pool_creation() { + new_test_ext().execute_with(|| { + // User 1 is the pool creator + let user = 1; + // User 2 is the attacker + let attacker = 2; + + let ed = get_ed(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), attacker, 10000 + ed)); + + // The target pool the user wants to create is Native <=> Asset(2) + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + // Attacker computes the still non-existing pool account for the target pair + let pool_account = + AssetConversion::get_pool_account(&AssetConversion::get_pool_id(token_2, token_1)); + // And transfers the ED to that pool account + assert_ok!(Balances::transfer(RuntimeOrigin::signed(attacker), pool_account, ed)); + // Then, the attacker creates 14 tokens and sends one of each to the pool account + for i in 10..25 { + create_tokens(attacker, vec![NativeOrAssetId::Asset(i)]); + assert_ok!(Assets::mint(RuntimeOrigin::signed(attacker), i, attacker, 1000)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(attacker), i, pool_account, 1)); + } + + // User can still create the pool + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + // User has to transfer one Asset(2) token to the pool account (otherwise add_liquidity will + // fail with `AssetTwoDepositDidNotMeetMinimum`) + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 10000)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(user), 2, pool_account, 1)); + + // add_liquidity shouldn't fail because of the number of consumers + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 100, + 10000, + 10, + user, + )); + }); +} diff --git a/frame/asset-conversion/src/types.rs b/frame/asset-conversion/src/types.rs new file mode 100644 index 0000000000000..7cd9743ff04b8 --- /dev/null +++ b/frame/asset-conversion/src/types.rs @@ -0,0 +1,184 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_std::{cmp::Ordering, marker::PhantomData}; + +pub(super) type PoolIdOf = (::MultiAssetId, ::MultiAssetId); + +/// Stores the lp_token asset id a particular pool has been assigned. +#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] +pub struct PoolInfo { + /// Liquidity pool asset + pub lp_token: PoolAssetId, +} + +/// A trait that converts between a MultiAssetId and either the native currency or an AssetId. +pub trait MultiAssetIdConverter { + /// Returns the MultiAssetId representing the native currency of the chain. + fn get_native() -> MultiAssetId; + + /// Returns true if the given MultiAssetId is the native currency. + fn is_native(asset: &MultiAssetId) -> bool; + + /// If it's not native, returns the AssetId for the given MultiAssetId. + fn try_convert(asset: &MultiAssetId) -> MultiAssetIdConversionResult; +} + +/// Result of `MultiAssetIdConverter::try_convert`. +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub enum MultiAssetIdConversionResult { + /// Input asset is successfully converted. Means that converted asset is supported. + Converted(AssetId), + /// Means that input asset is the chain's native asset, if it has one, so no conversion (see + /// `MultiAssetIdConverter::get_native`). + Native, + /// Means input asset is not supported for pool. + Unsupported(MultiAssetId), +} + +/// Benchmark Helper +#[cfg(feature = "runtime-benchmarks")] +pub trait BenchmarkHelper { + /// Returns an `AssetId` from a given integer. + fn asset_id(asset_id: u32) -> AssetId; + + /// Returns a `MultiAssetId` from a given integer. + fn multiasset_id(asset_id: u32) -> MultiAssetId; +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for () +where + AssetId: From, + MultiAssetId: From, +{ + fn asset_id(asset_id: u32) -> AssetId { + asset_id.into() + } + + fn multiasset_id(asset_id: u32) -> MultiAssetId { + asset_id.into() + } +} + +/// Trait for providing methods to swap between the various asset classes. +pub trait Swap { + /// Swap exactly `amount_in` of asset `path[0]` for asset `path[1]`. + /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire + /// the amount desired. + /// + /// Withdraws the `path[0]` asset from `sender`, deposits the `path[1]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. + fn swap_exact_tokens_for_tokens( + sender: AccountId, + path: Vec, + amount_in: Balance, + amount_out_min: Option, + send_to: AccountId, + keep_alive: bool, + ) -> Result; + + /// Take the `path[0]` asset and swap some amount for `amount_out` of the `path[1]`. If an + /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be + /// too costly. + /// + /// Withdraws `path[0]` asset from `sender`, deposits `path[1]` asset to `send_to`, + /// respecting `keep_alive`. + /// + /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. + fn swap_tokens_for_exact_tokens( + sender: AccountId, + path: Vec, + amount_out: Balance, + amount_in_max: Option, + send_to: AccountId, + keep_alive: bool, + ) -> Result; +} + +/// An implementation of MultiAssetId that can be either Native or an asset. +#[derive(Decode, Encode, Default, MaxEncodedLen, TypeInfo, Clone, Copy, Debug)] +pub enum NativeOrAssetId +where + AssetId: Ord, +{ + /// Native asset. For example, on the Polkadot Asset Hub this would be DOT. + #[default] + Native, + /// A non-native asset id. + Asset(AssetId), +} + +impl From for NativeOrAssetId { + fn from(asset: AssetId) -> Self { + Self::Asset(asset) + } +} + +impl Ord for NativeOrAssetId { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Native, Self::Native) => Ordering::Equal, + (Self::Native, Self::Asset(_)) => Ordering::Less, + (Self::Asset(_), Self::Native) => Ordering::Greater, + (Self::Asset(id1), Self::Asset(id2)) => ::cmp(id1, id2), + } + } +} +impl PartialOrd for NativeOrAssetId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(::cmp(self, other)) + } +} +impl PartialEq for NativeOrAssetId { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} +impl Eq for NativeOrAssetId {} + +/// Converts between a MultiAssetId and an AssetId (or the native currency). +pub struct NativeOrAssetIdConverter { + _phantom: PhantomData, +} + +impl MultiAssetIdConverter, AssetId> + for NativeOrAssetIdConverter +{ + fn get_native() -> NativeOrAssetId { + NativeOrAssetId::Native + } + + fn is_native(asset: &NativeOrAssetId) -> bool { + *asset == Self::get_native() + } + + fn try_convert( + asset: &NativeOrAssetId, + ) -> MultiAssetIdConversionResult, AssetId> { + match asset { + NativeOrAssetId::Asset(asset) => MultiAssetIdConversionResult::Converted(asset.clone()), + NativeOrAssetId::Native => MultiAssetIdConversionResult::Native, + } + } +} diff --git a/frame/asset-conversion/src/weights.rs b/frame/asset-conversion/src/weights.rs new file mode 100644 index 0000000000000..550878ba0be96 --- /dev/null +++ b/frame/asset-conversion/src/weights.rs @@ -0,0 +1,256 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_asset_conversion +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/production/substrate +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion +// --chain=dev +// --header=./HEADER-APACHE2 +// --output=./frame/asset-conversion/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_asset_conversion. +pub trait WeightInfo { + fn create_pool() -> Weight; + fn add_liquidity() -> Weight; + fn remove_liquidity() -> Weight; + fn swap_exact_tokens_for_tokens() -> Weight; + fn swap_tokens_for_exact_tokens() -> Weight; +} + +/// Weights for pallet_asset_conversion using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `AssetConversion::Pools` (r:1 w:1) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) + /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn create_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `729` + // Estimated: `6196` + // Minimum execution time: 131_688_000 picoseconds. + Weight::from_parts(134_092_000, 6196) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn add_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1382` + // Estimated: `6208` + // Minimum execution time: 157_310_000 picoseconds. + Weight::from_parts(161_547_000, 6208) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn remove_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1371` + // Estimated: `6208` + // Minimum execution time: 142_769_000 picoseconds. + Weight::from_parts(145_139_000, 6208) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:3 w:3) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:6 w:6) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn swap_exact_tokens_for_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1738` + // Estimated: `16644` + // Minimum execution time: 213_186_000 picoseconds. + Weight::from_parts(217_471_000, 16644) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) + } + /// Storage: `Assets::Asset` (r:3 w:3) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:6 w:6) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn swap_tokens_for_exact_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1738` + // Estimated: `16644` + // Minimum execution time: 213_793_000 picoseconds. + Weight::from_parts(218_584_000, 16644) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `AssetConversion::Pools` (r:1 w:1) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) + /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn create_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `729` + // Estimated: `6196` + // Minimum execution time: 131_688_000 picoseconds. + Weight::from_parts(134_092_000, 6196) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn add_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1382` + // Estimated: `6208` + // Minimum execution time: 157_310_000 picoseconds. + Weight::from_parts(161_547_000, 6208) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn remove_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1371` + // Estimated: `6208` + // Minimum execution time: 142_769_000 picoseconds. + Weight::from_parts(145_139_000, 6208) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:3 w:3) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:6 w:6) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn swap_exact_tokens_for_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1738` + // Estimated: `16644` + // Minimum execution time: 213_186_000 picoseconds. + Weight::from_parts(217_471_000, 16644) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) + } + /// Storage: `Assets::Asset` (r:3 w:3) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:6 w:6) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn swap_tokens_for_exact_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `1738` + // Estimated: `16644` + // Minimum execution time: 213_793_000 picoseconds. + Weight::from_parts(218_584_000, 16644) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) + } +} diff --git a/frame/asset-rate/Cargo.toml b/frame/asset-rate/Cargo.toml index 36aabb12bcbbd..ebea548d88c91 100644 --- a/frame/asset-rate/Cargo.toml +++ b/frame/asset-rate/Cargo.toml @@ -13,20 +13,21 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, optional = true, path = "../../primitives/core" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } [features] default = ["std"] @@ -38,15 +39,21 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "sp-core?/std", + "pallet-balances/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "sp-core", + "pallet-balances/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "sp-runtime/try-runtime", + "pallet-balances/try-runtime" ] diff --git a/frame/asset-rate/src/benchmarking.rs b/frame/asset-rate/src/benchmarking.rs index dde0d764affb2..0e13697806043 100644 --- a/frame/asset-rate/src/benchmarking.rs +++ b/frame/asset-rate/src/benchmarking.rs @@ -20,28 +20,43 @@ use super::*; use crate::{pallet as pallet_asset_rate, Pallet as AssetRate}; +use codec::Encode; use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; +use sp_core::crypto::FromEntropy; -const ASSET_ID: u32 = 1; +/// Trait describing the factory function for the `AssetKind` parameter. +pub trait AssetKindFactory { + fn create_asset_kind(seed: u32) -> AssetKind; +} +impl AssetKindFactory for () +where + AssetKind: FromEntropy, +{ + fn create_asset_kind(seed: u32) -> AssetKind { + AssetKind::from_entropy(&mut seed.encode().as_slice()).unwrap() + } +} + +const SEED: u32 = 1; fn default_conversion_rate() -> FixedU128 { FixedU128::from_u32(1u32) } -#[benchmarks(where ::AssetId: From)] +#[benchmarks] mod benchmarks { use super::*; #[benchmark] fn create() -> Result<(), BenchmarkError> { - let asset_id: T::AssetId = ASSET_ID.into(); + let asset_kind: T::AssetKind = T::BenchmarkHelper::create_asset_kind(SEED); #[extrinsic_call] - _(RawOrigin::Root, asset_id, default_conversion_rate()); + _(RawOrigin::Root, asset_kind.clone(), default_conversion_rate()); assert_eq!( - pallet_asset_rate::ConversionRateToNative::::get(asset_id), + pallet_asset_rate::ConversionRateToNative::::get(asset_kind), Some(default_conversion_rate()) ); Ok(()) @@ -49,18 +64,18 @@ mod benchmarks { #[benchmark] fn update() -> Result<(), BenchmarkError> { - let asset_id: T::AssetId = ASSET_ID.into(); + let asset_kind: T::AssetKind = T::BenchmarkHelper::create_asset_kind(SEED); assert_ok!(AssetRate::::create( RawOrigin::Root.into(), - asset_id, + asset_kind.clone(), default_conversion_rate() )); #[extrinsic_call] - _(RawOrigin::Root, asset_id, FixedU128::from_u32(2)); + _(RawOrigin::Root, asset_kind.clone(), FixedU128::from_u32(2)); assert_eq!( - pallet_asset_rate::ConversionRateToNative::::get(asset_id), + pallet_asset_rate::ConversionRateToNative::::get(asset_kind), Some(FixedU128::from_u32(2)) ); Ok(()) @@ -68,17 +83,17 @@ mod benchmarks { #[benchmark] fn remove() -> Result<(), BenchmarkError> { - let asset_id: T::AssetId = ASSET_ID.into(); + let asset_kind: T::AssetKind = T::BenchmarkHelper::create_asset_kind(SEED); assert_ok!(AssetRate::::create( RawOrigin::Root.into(), - ASSET_ID.into(), + asset_kind.clone(), default_conversion_rate() )); #[extrinsic_call] - _(RawOrigin::Root, asset_id); + _(RawOrigin::Root, asset_kind.clone()); - assert!(pallet_asset_rate::ConversionRateToNative::::get(asset_id).is_none()); + assert!(pallet_asset_rate::ConversionRateToNative::::get(asset_kind).is_none()); Ok(()) } diff --git a/frame/asset-rate/src/lib.rs b/frame/asset-rate/src/lib.rs index 8c6597a389833..8b55f3d1d4029 100644 --- a/frame/asset-rate/src/lib.rs +++ b/frame/asset-rate/src/lib.rs @@ -59,27 +59,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::{ - fungible::Inspect, - tokens::{Balance, ConversionFromAssetBalance}, -}; -use sp_runtime::{traits::Zero, FixedPointNumber, FixedPointOperand, FixedU128}; +use frame_support::traits::{fungible::Inspect, tokens::ConversionFromAssetBalance}; +use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128}; pub use pallet::*; pub use weights::WeightInfo; #[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; +mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; pub mod weights; +#[cfg(feature = "runtime-benchmarks")] +pub use benchmarking::AssetKindFactory; // Type alias for `frame_system`'s account id. type AccountIdOf = ::AccountId; -// This pallet's asset id and balance type. -type AssetIdOf = ::AssetId; +// This pallet's asset kind and balance type. +type AssetKindOf = ::AssetKind; // Generic fungible balance type. type BalanceOf = <::Currency as Inspect>>::Balance; @@ -109,38 +108,39 @@ pub mod pallet { /// The origin permissioned to update an existiing conversion rate for an asset. type UpdateOrigin: EnsureOrigin; - /// The units in which we record balances. - type Balance: Balance + FixedPointOperand; - /// The currency mechanism for this pallet. - type Currency: Inspect; + type Currency: Inspect; + + /// The type for asset kinds for which the conversion rate to native balance is set. + type AssetKind: Parameter + MaxEncodedLen; - /// The identifier for the class of asset. - type AssetId: frame_support::traits::tokens::AssetId; + /// Helper type for benchmarks. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: crate::AssetKindFactory; } /// Maps an asset to its fixed point representation in the native balance. /// - /// E.g. `native_amount = asset_amount * ConversionRateToNative::::get(asset_id)` + /// E.g. `native_amount = asset_amount * ConversionRateToNative::::get(asset_kind)` #[pallet::storage] pub type ConversionRateToNative = - StorageMap<_, Blake2_128Concat, T::AssetId, FixedU128, OptionQuery>; + StorageMap<_, Blake2_128Concat, T::AssetKind, FixedU128, OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - // Some `asset_id` conversion rate was created. - AssetRateCreated { asset_id: T::AssetId, rate: FixedU128 }, - // Some `asset_id` conversion rate was removed. - AssetRateRemoved { asset_id: T::AssetId }, - // Some existing `asset_id` conversion rate was updated from `old` to `new`. - AssetRateUpdated { asset_id: T::AssetId, old: FixedU128, new: FixedU128 }, + // Some `asset_kind` conversion rate was created. + AssetRateCreated { asset_kind: T::AssetKind, rate: FixedU128 }, + // Some `asset_kind` conversion rate was removed. + AssetRateRemoved { asset_kind: T::AssetKind }, + // Some existing `asset_kind` conversion rate was updated from `old` to `new`. + AssetRateUpdated { asset_kind: T::AssetKind, old: FixedU128, new: FixedU128 }, } #[pallet::error] pub enum Error { /// The given asset ID is unknown. - UnknownAssetId, + UnknownAssetKind, /// The given asset ID already has an assigned conversion rate and cannot be re-created. AlreadyExists, } @@ -155,18 +155,18 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, - asset_id: T::AssetId, + asset_kind: T::AssetKind, rate: FixedU128, ) -> DispatchResult { T::CreateOrigin::ensure_origin(origin)?; ensure!( - !ConversionRateToNative::::contains_key(asset_id), + !ConversionRateToNative::::contains_key(asset_kind.clone()), Error::::AlreadyExists ); - ConversionRateToNative::::set(asset_id, Some(rate)); + ConversionRateToNative::::set(asset_kind.clone(), Some(rate)); - Self::deposit_event(Event::AssetRateCreated { asset_id, rate }); + Self::deposit_event(Event::AssetRateCreated { asset_kind, rate }); Ok(()) } @@ -178,24 +178,24 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::update())] pub fn update( origin: OriginFor, - asset_id: T::AssetId, + asset_kind: T::AssetKind, rate: FixedU128, ) -> DispatchResult { T::UpdateOrigin::ensure_origin(origin)?; let mut old = FixedU128::zero(); - ConversionRateToNative::::mutate(asset_id, |maybe_rate| { + ConversionRateToNative::::mutate(asset_kind.clone(), |maybe_rate| { if let Some(r) = maybe_rate { old = *r; *r = rate; Ok(()) } else { - Err(Error::::UnknownAssetId) + Err(Error::::UnknownAssetKind) } })?; - Self::deposit_event(Event::AssetRateUpdated { asset_id, old, new: rate }); + Self::deposit_event(Event::AssetRateUpdated { asset_kind, old, new: rate }); Ok(()) } @@ -205,35 +205,34 @@ pub mod pallet { /// - O(1) #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::remove())] - pub fn remove(origin: OriginFor, asset_id: T::AssetId) -> DispatchResult { + pub fn remove(origin: OriginFor, asset_kind: T::AssetKind) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; ensure!( - ConversionRateToNative::::contains_key(asset_id), - Error::::UnknownAssetId + ConversionRateToNative::::contains_key(asset_kind.clone()), + Error::::UnknownAssetKind ); - ConversionRateToNative::::remove(asset_id); + ConversionRateToNative::::remove(asset_kind.clone()); - Self::deposit_event(Event::AssetRateRemoved { asset_id }); + Self::deposit_event(Event::AssetRateRemoved { asset_kind }); Ok(()) } } } /// Exposes conversion of an arbitrary balance of an asset to native balance. -impl ConversionFromAssetBalance, AssetIdOf, BalanceOf> for Pallet +impl ConversionFromAssetBalance, AssetKindOf, BalanceOf> for Pallet where T: Config, - BalanceOf: FixedPointOperand + Zero, { type Error = pallet::Error; fn from_asset_balance( balance: BalanceOf, - asset_id: AssetIdOf, + asset_kind: AssetKindOf, ) -> Result, pallet::Error> { - let rate = pallet::ConversionRateToNative::::get(asset_id) - .ok_or(pallet::Error::::UnknownAssetId.into())?; + let rate = pallet::ConversionRateToNative::::get(asset_kind) + .ok_or(pallet::Error::::UnknownAssetKind.into())?; Ok(rate.saturating_mul_int(balance)) } } diff --git a/frame/asset-rate/src/mock.rs b/frame/asset-rate/src/mock.rs index 9775b7a747926..5fe0d4240af58 100644 --- a/frame/asset-rate/src/mock.rs +++ b/frame/asset-rate/src/mock.rs @@ -21,18 +21,14 @@ use crate as pallet_asset_rate; use frame_support::traits::{ConstU16, ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, AssetRate: pallet_asset_rate, @@ -47,13 +43,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -77,7 +72,7 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); @@ -89,12 +84,13 @@ impl pallet_asset_rate::Config for Test { type CreateOrigin = frame_system::EnsureRoot; type RemoveOrigin = frame_system::EnsureRoot; type UpdateOrigin = frame_system::EnsureRoot; - type Balance = u64; type Currency = Balances; - type AssetId = u32; + type AssetKind = u32; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); } // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/frame/asset-rate/src/tests.rs b/frame/asset-rate/src/tests.rs index 4e5a3167bef21..8990ba9fc28d6 100644 --- a/frame/asset-rate/src/tests.rs +++ b/frame/asset-rate/src/tests.rs @@ -66,7 +66,7 @@ fn remove_unknown_throws() { new_test_ext().execute_with(|| { assert_noop!( AssetRate::remove(RuntimeOrigin::root(), ASSET_ID,), - Error::::UnknownAssetId + Error::::UnknownAssetKind ); }); } @@ -89,7 +89,7 @@ fn update_unknown_throws() { new_test_ext().execute_with(|| { assert_noop!( AssetRate::update(RuntimeOrigin::root(), ASSET_ID, FixedU128::from_float(0.5)), - Error::::UnknownAssetId + Error::::UnknownAssetKind ); }); } @@ -101,7 +101,7 @@ fn convert_works() { let conversion = , - ::AssetId, + ::AssetKind, BalanceOf, >>::from_asset_balance(10, ASSET_ID); assert_eq!(conversion.expect("Conversion rate exists for asset"), 25); @@ -113,7 +113,7 @@ fn convert_unknown_throws() { new_test_ext().execute_with(|| { let conversion = , - ::AssetId, + ::AssetKind, BalanceOf, >>::from_asset_balance(10, ASSET_ID); assert!(conversion.is_err()); diff --git a/frame/asset-rate/src/weights.rs b/frame/asset-rate/src/weights.rs index 4fae62634ef13..582e20e56d7dc 100644 --- a/frame/asset-rate/src/weights.rs +++ b/frame/asset-rate/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_asset_rate //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `local`, CPU: `` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_asset_rate +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_asset_rate. pub trait WeightInfo { @@ -62,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(7_000_000, 3501) + // Minimum execution time: 11_700_000 picoseconds. + Weight::from_parts(12_158_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -73,8 +77,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 3501) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_548_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -84,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 3501) + // Minimum execution time: 12_541_000 picoseconds. + Weight::from_parts(12_956_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -99,8 +103,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(7_000_000, 3501) + // Minimum execution time: 11_700_000 picoseconds. + Weight::from_parts(12_158_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -110,8 +114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 3501) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_548_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -121,8 +125,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 3501) + // Minimum execution time: 12_541_000 picoseconds. + Weight::from_parts(12_956_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index d3adc47f5f803..4bef7195958f9 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -13,21 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] -sp-std = { version = "5.0.0", path = "../../primitives/std" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../primitives/std" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] @@ -41,10 +41,19 @@ std = [ "frame-support/std", "frame-system/std", "frame-benchmarking?/std", + "pallet-balances/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index a2483650715a2..376f19139ab9b 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -141,10 +141,10 @@ benchmarks_instance_pallet! { let asset_id = default_asset_id::(); let origin = T::CreateOrigin::try_successful_origin(&asset_id.into()) .map_err(|_| BenchmarkError::Weightless)?; - let caller = T::CreateOrigin::ensure_origin(origin, &asset_id.into()).unwrap(); + let caller = T::CreateOrigin::ensure_origin(origin.clone(), &asset_id.into()).unwrap(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - }: _(SystemOrigin::Signed(caller.clone()), asset_id, caller_lookup, 1u32.into()) + }: _(origin, asset_id, caller_lookup, 1u32.into()) verify { assert_last_event::(Event::Created { asset_id: asset_id.into(), creator: caller.clone(), owner: caller }.into()); } @@ -514,8 +514,8 @@ benchmarks_instance_pallet! { SystemOrigin::Signed(new_account.clone()).into(), asset_id ).is_ok()); - // `touch` should reserve some balance of the caller... - assert!(!T::Currency::reserved_balance(&new_account).is_zero()); + // `touch` should reserve balance of the caller according to the `AssetAccountDeposit` amount... + assert_eq!(T::Currency::reserved_balance(&new_account), T::AssetAccountDeposit::get()); // ...and also create an `Account` entry. assert!(Account::::contains_key(asset_id.into(), &new_account)); }: _(SystemOrigin::Signed(new_account.clone()), asset_id, true) @@ -535,8 +535,8 @@ benchmarks_instance_pallet! { asset_id, new_account_lookup.clone() ).is_ok()); - // `touch_other` should reserve balance of the freezer - assert!(!T::Currency::reserved_balance(&asset_owner).is_zero()); + // `touch` should reserve balance of the caller according to the `AssetAccountDeposit` amount... + assert_eq!(T::Currency::reserved_balance(&asset_owner), T::AssetAccountDeposit::get()); assert!(Account::::contains_key(asset_id.into(), &new_account)); }: _(SystemOrigin::Signed(asset_owner.clone()), asset_id, new_account_lookup.clone()) verify { diff --git a/frame/assets/src/extra_mutator.rs b/frame/assets/src/extra_mutator.rs index 96fb765cb0382..2a44df5f0c661 100644 --- a/frame/assets/src/extra_mutator.rs +++ b/frame/assets/src/extra_mutator.rs @@ -62,7 +62,7 @@ impl, I: 'static> ExtraMutator { id: T::AssetId, who: impl sp_std::borrow::Borrow, ) -> Option> { - if let Some(a) = Account::::get(id, who.borrow()) { + if let Some(a) = Account::::get(&id, who.borrow()) { Some(ExtraMutator:: { id, who: who.borrow().clone(), @@ -77,7 +77,7 @@ impl, I: 'static> ExtraMutator { /// Commit any changes to storage. pub fn commit(&mut self) -> Result<(), ()> { if let Some(extra) = self.pending.take() { - Account::::try_mutate(self.id, self.who.borrow(), |maybe_account| { + Account::::try_mutate(&self.id, &self.who, |maybe_account| { maybe_account.as_mut().ok_or(()).map(|account| account.extra = extra) }) } else { @@ -88,7 +88,7 @@ impl, I: 'static> ExtraMutator { /// Revert any changes, even those already committed by `self` and drop self. pub fn revert(mut self) -> Result<(), ()> { self.pending = None; - Account::::try_mutate(self.id, self.who.borrow(), |maybe_account| { + Account::::try_mutate(&self.id, &self.who, |maybe_account| { maybe_account .as_mut() .ok_or(()) diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index d7c5bbe95e9fe..c2c1b6839060e 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -18,7 +18,7 @@ //! Functions for the Assets pallet. use super::*; -use frame_support::{traits::Get, BoundedVec}; +use frame_support::{defensive, traits::Get, BoundedVec}; #[must_use] pub(super) enum DeadConsequence { @@ -128,7 +128,7 @@ impl, I: 'static> Pallet { amount: T::Balance, increase_supply: bool, ) -> DepositConsequence { - let details = match Asset::::get(id) { + let details = match Asset::::get(&id) { Some(details) => details, None => return DepositConsequence::UnknownAsset, }; @@ -165,7 +165,7 @@ impl, I: 'static> Pallet { keep_alive: bool, ) -> WithdrawConsequence { use WithdrawConsequence::*; - let details = match Asset::::get(id) { + let details = match Asset::::get(&id) { Some(details) => details, None => return UnknownAsset, }; @@ -178,7 +178,7 @@ impl, I: 'static> Pallet { if amount.is_zero() { return Success } - let account = match Account::::get(id, who) { + let account = match Account::::get(&id, who) { Some(a) => a, None => return BalanceLow, }; @@ -186,7 +186,7 @@ impl, I: 'static> Pallet { return Frozen } if let Some(rest) = account.balance.checked_sub(&amount) { - if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + if let Some(frozen) = T::Freezer::frozen_balance(id.clone(), who) { match frozen.checked_add(&details.min_balance) { Some(required) if rest < required => return Frozen, None => return Overflow, @@ -194,12 +194,8 @@ impl, I: 'static> Pallet { } } - let is_provider = false; - let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); - let must_keep_alive = keep_alive || is_required; - if rest < details.min_balance { - if must_keep_alive { + if keep_alive { WouldDie } else { ReducedToZero(rest) @@ -219,10 +215,10 @@ impl, I: 'static> Pallet { who: &T::AccountId, keep_alive: bool, ) -> Result { - let details = Asset::::get(id).ok_or(Error::::Unknown)?; + let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); - let account = Account::::get(id, who).ok_or(Error::::NoAccount)?; + let account = Account::::get(&id, who).ok_or(Error::::NoAccount)?; ensure!(!account.status.is_frozen(), Error::::Frozen); let amount = if let Some(frozen) = T::Freezer::frozen_balance(id, who) { @@ -231,9 +227,7 @@ impl, I: 'static> Pallet { frozen.checked_add(&details.min_balance).ok_or(ArithmeticError::Overflow)?; account.balance.saturating_sub(required) } else { - let is_provider = false; - let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); - if keep_alive || is_required { + if keep_alive { // We want to keep the account around. account.balance.saturating_sub(details.min_balance) } else { @@ -265,7 +259,7 @@ impl, I: 'static> Pallet { amount: T::Balance, f: DebitFlags, ) -> Result { - let actual = Self::reducible_balance(id, target, f.keep_alive)?.min(amount); + let actual = Self::reducible_balance(id.clone(), target, f.keep_alive)?.min(amount); ensure!(f.best_effort || actual >= amount, Error::::BalanceLow); let conseq = Self::can_decrease(id, target, actual, f.keep_alive); @@ -320,7 +314,7 @@ impl, I: 'static> Pallet { depositor: T::AccountId, check_depositor: bool, ) -> DispatchResult { - ensure!(!Account::::contains_key(id, &who), Error::::AlreadyExists); + ensure!(!Account::::contains_key(&id, &who), Error::::AlreadyExists); let deposit = T::AssetAccountDeposit::get(); let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); @@ -332,7 +326,7 @@ impl, I: 'static> Pallet { T::Currency::reserve(&depositor, deposit)?; Asset::::insert(&id, details); Account::::insert( - id, + &id, &who, AssetAccountOf:: { balance: Zero::zero(), @@ -350,7 +344,7 @@ impl, I: 'static> Pallet { pub(super) fn do_refund(id: T::AssetId, who: T::AccountId, allow_burn: bool) -> DispatchResult { use AssetStatus::*; use ExistenceReason::*; - let mut account = Account::::get(id, &who).ok_or(Error::::NoDeposit)?; + let mut account = Account::::get(&id, &who).ok_or(Error::::NoDeposit)?; ensure!(matches!(account.reason, Consumer | DepositHeld(..)), Error::::NoDeposit); let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(matches!(details.status, Live | Frozen), Error::::IncorrectStatus); @@ -361,7 +355,7 @@ impl, I: 'static> Pallet { } if let Remove = Self::dead_account(&who, &mut details, &account.reason, false) { - Account::::remove(id, &who); + Account::::remove(&id, &who); } else { debug_assert!(false, "refund did not result in dead account?!"); // deposit may have been refunded, need to update `Account` @@ -380,7 +374,7 @@ impl, I: 'static> Pallet { who: &T::AccountId, caller: &T::AccountId, ) -> DispatchResult { - let mut account = Account::::get(id, &who).ok_or(Error::::NoDeposit)?; + let mut account = Account::::get(&id, &who).ok_or(Error::::NoDeposit)?; let (depositor, deposit) = account.reason.take_deposit_from().ok_or(Error::::NoDeposit)?; let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; @@ -392,11 +386,11 @@ impl, I: 'static> Pallet { T::Currency::unreserve(&depositor, deposit); if let Remove = Self::dead_account(&who, &mut details, &account.reason, false) { - Account::::remove(id, &who); + Account::::remove(&id, &who); } else { debug_assert!(false, "refund did not result in dead account?!"); // deposit may have been refunded, need to update `Account` - Account::::insert(id, &who, account); + Account::::insert(&id, &who, account); return Ok(()) } Asset::::insert(&id, details); @@ -416,7 +410,7 @@ impl, I: 'static> Pallet { amount: T::Balance, maybe_check_issuer: Option, ) -> DispatchResult { - Self::increase_balance(id, beneficiary, amount, |details| -> DispatchResult { + Self::increase_balance(id.clone(), beneficiary, amount, |details| -> DispatchResult { if let Some(check_issuer) = maybe_check_issuer { ensure!(check_issuer == details.issuer, Error::::NoPermission); } @@ -450,19 +444,20 @@ impl, I: 'static> Pallet { return Ok(()) } - Self::can_increase(id, beneficiary, amount, true).into_result()?; - Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + Self::can_increase(id.clone(), beneficiary, amount, true).into_result()?; + Asset::::try_mutate(&id, |maybe_details| -> DispatchResult { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); check(details)?; - Account::::try_mutate(id, beneficiary, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, beneficiary, |maybe_account| -> DispatchResult { match maybe_account { Some(ref mut account) => { account.balance.saturating_accrue(amount); }, maybe_account @ None => { - // Note this should never fail as it's already checked by `can_increase`. + // Note this should never fail as it's already checked by + // `can_increase`. ensure!(amount >= details.min_balance, TokenError::BelowMinimum); *maybe_account = Some(AssetAccountOf:: { balance: amount, @@ -493,13 +488,13 @@ impl, I: 'static> Pallet { maybe_check_admin: Option, f: DebitFlags, ) -> Result { - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, Error::::AssetNotLive ); - let actual = Self::decrease_balance(id, target, amount, f, |actual, details| { + let actual = Self::decrease_balance(id.clone(), target, amount, f, |actual, details| { // Check admin rights. if let Some(check_admin) = maybe_check_admin { ensure!(check_admin == details.admin, Error::::NoPermission); @@ -536,17 +531,17 @@ impl, I: 'static> Pallet { return Ok(amount) } - let details = Asset::::get(id).ok_or(Error::::Unknown)?; + let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); - let actual = Self::prep_debit(id, target, amount, f)?; + let actual = Self::prep_debit(id.clone(), target, amount, f)?; let mut target_died: Option = None; - Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + Asset::::try_mutate(&id, |maybe_details| -> DispatchResult { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; check(actual, details)?; - Account::::try_mutate(id, target, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, target, |maybe_account| -> DispatchResult { let mut account = maybe_account.take().ok_or(Error::::NoAccount)?; debug_assert!(account.balance >= actual, "checked in prep; qed"); @@ -590,7 +585,7 @@ impl, I: 'static> Pallet { f: TransferFlags, ) -> Result { let (balance, died) = - Self::transfer_and_die(id, source, dest, amount, maybe_need_admin, f)?; + Self::transfer_and_die(id.clone(), source, dest, amount, maybe_need_admin, f)?; if let Some(Remove) = died { T::Freezer::died(id, source); } @@ -611,18 +606,18 @@ impl, I: 'static> Pallet { if amount.is_zero() { return Ok((amount, None)) } - let details = Asset::::get(id).ok_or(Error::::Unknown)?; + let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); // Figure out the debit and credit, together with side-effects. - let debit = Self::prep_debit(id, source, amount, f.into())?; - let (credit, maybe_burn) = Self::prep_credit(id, dest, amount, debit, f.burn_dust)?; + let debit = Self::prep_debit(id.clone(), source, amount, f.into())?; + let (credit, maybe_burn) = Self::prep_credit(id.clone(), dest, amount, debit, f.burn_dust)?; let mut source_account = - Account::::get(id, &source).ok_or(Error::::NoAccount)?; + Account::::get(&id, &source).ok_or(Error::::NoAccount)?; let mut source_died: Option = None; - Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + Asset::::try_mutate(&id, |maybe_details| -> DispatchResult { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; // Check admin rights. @@ -647,7 +642,7 @@ impl, I: 'static> Pallet { debug_assert!(source_account.balance >= debit, "checked in prep; qed"); source_account.balance = source_account.balance.saturating_sub(debit); - Account::::try_mutate(id, &dest, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, &dest, |maybe_account| -> DispatchResult { match maybe_account { Some(ref mut account) => { // Calculate new balance; this will not saturate since it's already checked @@ -676,11 +671,11 @@ impl, I: 'static> Pallet { source_died = Some(Self::dead_account(source, details, &source_account.reason, false)); if let Some(Remove) = source_died { - Account::::remove(id, &source); + Account::::remove(&id, &source); return Ok(()) } } - Account::::insert(id, &source, &source_account); + Account::::insert(&id, &source, &source_account); Ok(()) })?; @@ -707,11 +702,11 @@ impl, I: 'static> Pallet { is_sufficient: bool, min_balance: T::Balance, ) -> DispatchResult { - ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); Asset::::insert( - id, + &id, AssetDetails { owner: owner.clone(), issuer: owner.clone(), @@ -738,8 +733,8 @@ impl, I: 'static> Pallet { id: T::AssetId, maybe_check_owner: Option, ) -> DispatchResult { - Asset::::try_mutate_exists(id, |maybe_details| -> Result<(), DispatchError> { - let mut details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + Asset::::try_mutate_exists(id.clone(), |maybe_details| -> Result<(), DispatchError> { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; if let Some(check_owner) = maybe_check_owner { ensure!(details.owner == check_owner, Error::::NoPermission); } @@ -761,15 +756,26 @@ impl, I: 'static> Pallet { let mut dead_accounts: Vec = vec![]; let mut remaining_accounts = 0; let _ = - Asset::::try_mutate_exists(id, |maybe_details| -> Result<(), DispatchError> { + Asset::::try_mutate_exists(&id, |maybe_details| -> Result<(), DispatchError> { let mut details = maybe_details.as_mut().ok_or(Error::::Unknown)?; // Should only destroy accounts while the asset is in a destroying state ensure!(details.status == AssetStatus::Destroying, Error::::IncorrectStatus); - - for (who, v) in Account::::drain_prefix(id) { - let _ = Self::dead_account(&who, &mut details, &v.reason, true); - dead_accounts.push(who); - if dead_accounts.len() >= (max_items as usize) { + for (i, (who, mut v)) in Account::::iter_prefix(&id).enumerate() { + // unreserve the existence deposit if any + if let Some((depositor, deposit)) = v.reason.take_deposit_from() { + T::Currency::unreserve(&depositor, deposit); + } else if let Some(deposit) = v.reason.take_deposit() { + T::Currency::unreserve(&who, deposit); + } + if let Remove = Self::dead_account(&who, &mut details, &v.reason, false) { + Account::::remove(&id, &who); + dead_accounts.push(who); + } else { + // deposit may have been released, need to update `Account` + Account::::insert(&id, &who, v); + defensive!("destroy did not result in dead account?!"); + } + if i + 1 >= (max_items as usize) { break } } @@ -778,7 +784,7 @@ impl, I: 'static> Pallet { })?; for who in &dead_accounts { - T::Freezer::died(id, &who); + T::Freezer::died(id.clone(), &who); } Self::deposit_event(Event::AccountsDestroyed { @@ -798,14 +804,15 @@ impl, I: 'static> Pallet { max_items: u32, ) -> Result { let mut removed_approvals = 0; - let _ = - Asset::::try_mutate_exists(id, |maybe_details| -> Result<(), DispatchError> { - let mut details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + let _ = Asset::::try_mutate_exists( + id.clone(), + |maybe_details| -> Result<(), DispatchError> { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; // Should only destroy accounts while the asset is in a destroying state. ensure!(details.status == AssetStatus::Destroying, Error::::IncorrectStatus); - for ((owner, _), approval) in Approvals::::drain_prefix((id,)) { + for ((owner, _), approval) in Approvals::::drain_prefix((id.clone(),)) { T::Currency::unreserve(&owner, approval.deposit); removed_approvals = removed_approvals.saturating_add(1); details.approvals = details.approvals.saturating_sub(1); @@ -819,7 +826,8 @@ impl, I: 'static> Pallet { approvals_remaining: details.approvals as u32, }); Ok(()) - })?; + }, + )?; Ok(removed_approvals) } @@ -827,7 +835,7 @@ impl, I: 'static> Pallet { /// /// On success, the `Event::Destroyed` event is emitted. pub(super) fn do_finish_destroy(id: T::AssetId) -> DispatchResult { - Asset::::try_mutate_exists(id, |maybe_details| -> Result<(), DispatchError> { + Asset::::try_mutate_exists(id.clone(), |maybe_details| -> Result<(), DispatchError> { let details = maybe_details.take().ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Destroying, Error::::IncorrectStatus); ensure!(details.accounts == 0, Error::::InUse); @@ -855,10 +863,10 @@ impl, I: 'static> Pallet { delegate: &T::AccountId, amount: T::Balance, ) -> DispatchResult { - let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + let mut d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); Approvals::::try_mutate( - (id, &owner, &delegate), + (id.clone(), &owner, &delegate), |maybe_approved| -> DispatchResult { let mut approved = match maybe_approved.take() { // an approval already exists and is being updated @@ -879,7 +887,7 @@ impl, I: 'static> Pallet { Ok(()) }, )?; - Asset::::insert(id, d); + Asset::::insert(&id, d); Self::deposit_event(Event::ApprovedTransfer { asset_id: id, source: owner.clone(), @@ -906,22 +914,23 @@ impl, I: 'static> Pallet { ) -> DispatchResult { let mut owner_died: Option = None; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); Approvals::::try_mutate_exists( - (id, &owner, delegate), + (id.clone(), &owner, delegate), |maybe_approved| -> DispatchResult { let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; let remaining = approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; - owner_died = Self::transfer_and_die(id, owner, destination, amount, None, f)?.1; + owner_died = + Self::transfer_and_die(id.clone(), owner, destination, amount, None, f)?.1; if remaining.is_zero() { T::Currency::unreserve(owner, approved.deposit); - Asset::::mutate(id, |maybe_details| { + Asset::::mutate(id.clone(), |maybe_details| { if let Some(details) = maybe_details { details.approvals.saturating_dec(); } @@ -954,17 +963,15 @@ impl, I: 'static> Pallet { let bounded_symbol: BoundedVec = symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(from == &d.owner, Error::::NoPermission); - Metadata::::try_mutate_exists(id, |metadata| { + Metadata::::try_mutate_exists(id.clone(), |metadata| { ensure!(metadata.as_ref().map_or(true, |m| !m.is_frozen), Error::::NoPermission); let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - let new_deposit = T::MetadataDepositPerByte::get() - .saturating_mul(((name.len() + symbol.len()) as u32).into()) - .saturating_add(T::MetadataDepositBase::get()); + let new_deposit = Self::calc_metadata_deposit(&name, &symbol); if new_deposit > old_deposit { T::Currency::reserve(from, new_deposit - old_deposit)?; @@ -991,10 +998,19 @@ impl, I: 'static> Pallet { }) } + /// Calculate the metadata deposit for the provided data. + pub(super) fn calc_metadata_deposit(name: &[u8], symbol: &[u8]) -> DepositBalanceOf { + T::MetadataDepositPerByte::get() + .saturating_mul(((name.len() + symbol.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()) + } + /// Returns all the non-zero balances for all assets of the given `account`. pub fn account_balances(account: T::AccountId) -> Vec<(T::AssetId, T::Balance)> { Asset::::iter_keys() - .filter_map(|id| Self::maybe_balance(id, account.clone()).map(|balance| (id, balance))) + .filter_map(|id| { + Self::maybe_balance(id.clone(), account.clone()).map(|balance| (id, balance)) + }) .collect::>() } } diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 893d74b6aa306..123abeba8283f 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -226,6 +226,19 @@ impl, I: 'static> fungibles::metadata::Mutate<:: } } +impl, I: 'static> + fungibles::metadata::MetadataDeposit< + ::AccountId>>::Balance, + > for Pallet +{ + fn calc_metadata_deposit( + name: &[u8], + symbol: &[u8], + ) -> ::AccountId>>::Balance { + Self::calc_metadata_deposit(&name, &symbol) + } +} + impl, I: 'static> fungibles::approvals::Inspect<::AccountId> for Pallet { @@ -244,6 +257,7 @@ impl, I: 'static> fungibles::approvals::Inspect< impl, I: 'static> fungibles::approvals::Mutate<::AccountId> for Pallet { + // Approve spending tokens from a given account fn approve( asset: T::AssetId, owner: &::AccountId, @@ -253,7 +267,6 @@ impl, I: 'static> fungibles::approvals::Mutate<: Self::do_approve_transfer(asset, owner, delegate, amount) } - // Aprove spending tokens from a given account fn transfer_from( asset: T::AssetId, owner: &::AccountId, diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs index 5ead708469a94..a7a5a0859f701 100644 --- a/frame/assets/src/impl_stored_map.rs +++ b/frame/assets/src/impl_stored_map.rs @@ -21,7 +21,7 @@ use super::*; impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> for Pallet { fn get(id_who: &(T::AssetId, T::AccountId)) -> T::Extra { - let &(id, ref who) = id_who; + let (id, who) = id_who; Account::::get(id, who).map(|a| a.extra).unwrap_or_default() } @@ -29,7 +29,7 @@ impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> f id_who: &(T::AssetId, T::AccountId), f: impl FnOnce(&mut Option) -> Result, ) -> Result { - let &(id, ref who) = id_who; + let (id, who) = id_who; let mut maybe_extra = Account::::get(id, who).map(|a| a.extra); let r = f(&mut maybe_extra)?; // They want to write some value or delete it. diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e9259f4b670ad..363a99701b56a 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -162,7 +162,7 @@ use sp_runtime::{ traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero}, ArithmeticError, TokenError, }; -use sp_std::{borrow::Borrow, prelude::*}; +use sp_std::prelude::*; use frame_support::{ dispatch::{DispatchError, DispatchResult}, @@ -250,7 +250,7 @@ pub mod pallet { type RemoveItemsLimit: Get; /// Identifier for the class of asset. - type AssetId: Member + Parameter + Copy + MaybeSerializeDeserialize + MaxEncodedLen; + type AssetId: Member + Parameter + Clone + MaybeSerializeDeserialize + MaxEncodedLen; /// Wrapper around `Self::AssetId` to use in dispatchable call signatures. Allows the use /// of compact encoding in instances of the pallet, which will prevent breaking changes @@ -380,7 +380,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { for (id, owner, is_sufficient, min_balance) in &self.assets { assert!(!Asset::::contains_key(id), "Asset id already in use"); @@ -424,7 +424,7 @@ pub mod pallet { for (id, account_id, amount) in &self.accounts { let result = >::increase_balance( - *id, + id.clone(), account_id, *amount, |details| -> DispatchResult { @@ -605,14 +605,14 @@ pub mod pallet { let owner = T::CreateOrigin::ensure_origin(origin, &id)?; let admin = T::Lookup::lookup(admin)?; - ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); let deposit = T::AssetDeposit::get(); T::Currency::reserve(&owner, deposit)?; Asset::::insert( - id, + id.clone(), AssetDetails { owner: owner.clone(), issuer: admin.clone(), @@ -937,7 +937,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, Error::::AssetNotLive @@ -945,7 +945,7 @@ pub mod pallet { ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - Account::::try_mutate(id, &who, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, &who, |maybe_account| -> DispatchResult { maybe_account.as_mut().ok_or(Error::::NoAccount)?.status = AccountStatus::Frozen; Ok(()) @@ -974,7 +974,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - let details = Asset::::get(id).ok_or(Error::::Unknown)?; + let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( details.status == AssetStatus::Live || details.status == AssetStatus::Frozen, Error::::AssetNotLive @@ -982,7 +982,7 @@ pub mod pallet { ensure!(origin == details.admin, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - Account::::try_mutate(id, &who, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, &who, |maybe_account| -> DispatchResult { maybe_account.as_mut().ok_or(Error::::NoAccount)?.status = AccountStatus::Liquid; Ok(()) @@ -1006,7 +1006,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - Asset::::try_mutate(id, |maybe_details| { + Asset::::try_mutate(id.clone(), |maybe_details| { let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(origin == d.freezer, Error::::NoPermission); @@ -1032,7 +1032,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - Asset::::try_mutate(id, |maybe_details| { + Asset::::try_mutate(id.clone(), |maybe_details| { let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(origin == d.admin, Error::::NoPermission); ensure!(d.status == AssetStatus::Frozen, Error::::NotFrozen); @@ -1064,7 +1064,7 @@ pub mod pallet { let owner = T::Lookup::lookup(owner)?; let id: T::AssetId = id.into(); - Asset::::try_mutate(id, |maybe_details| { + Asset::::try_mutate(id.clone(), |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::LiveAsset); ensure!(origin == details.owner, Error::::NoPermission); @@ -1072,7 +1072,7 @@ pub mod pallet { return Ok(()) } - let metadata_deposit = Metadata::::get(id).deposit; + let metadata_deposit = Metadata::::get(&id).deposit; let deposit = details.deposit + metadata_deposit; // Move the deposit to the new owner. @@ -1111,7 +1111,7 @@ pub mod pallet { let freezer = T::Lookup::lookup(freezer)?; let id: T::AssetId = id.into(); - Asset::::try_mutate(id, |maybe_details| { + Asset::::try_mutate(id.clone(), |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(origin == details.owner, Error::::NoPermission); @@ -1171,11 +1171,11 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(origin == d.owner, Error::::NoPermission); - Metadata::::try_mutate_exists(id, |metadata| { + Metadata::::try_mutate_exists(id.clone(), |metadata| { let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); Self::deposit_event(Event::MetadataCleared { asset_id: id }); @@ -1216,8 +1216,8 @@ pub mod pallet { let bounded_symbol: BoundedVec = symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; - ensure!(Asset::::contains_key(id), Error::::Unknown); - Metadata::::try_mutate_exists(id, |metadata| { + ensure!(Asset::::contains_key(&id), Error::::Unknown); + Metadata::::try_mutate_exists(id.clone(), |metadata| { let deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); *metadata = Some(AssetMetadata { deposit, @@ -1257,8 +1257,8 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let id: T::AssetId = id.into(); - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - Metadata::::try_mutate_exists(id, |metadata| { + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; + Metadata::::try_mutate_exists(id.clone(), |metadata| { let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); Self::deposit_event(Event::MetadataCleared { asset_id: id }); @@ -1303,7 +1303,7 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let id: T::AssetId = id.into(); - Asset::::try_mutate(id, |maybe_asset| { + Asset::::try_mutate(id.clone(), |maybe_asset| { let mut asset = maybe_asset.take().ok_or(Error::::Unknown)?; ensure!(asset.status != AssetStatus::Destroying, Error::::AssetNotLive); asset.owner = T::Lookup::lookup(owner)?; @@ -1379,15 +1379,15 @@ pub mod pallet { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; let id: T::AssetId = id.into(); - let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + let mut d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); - let approval = - Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + let approval = Approvals::::take((id.clone(), &owner, &delegate)) + .ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); d.approvals.saturating_dec(); - Asset::::insert(id, d); + Asset::::insert(id.clone(), d); Self::deposit_event(Event::ApprovalCancelled { asset_id: id, owner, delegate }); Ok(()) @@ -1414,7 +1414,7 @@ pub mod pallet { delegate: AccountIdLookupOf, ) -> DispatchResult { let id: T::AssetId = id.into(); - let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + let mut d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(d.status == AssetStatus::Live, Error::::AssetNotLive); T::ForceOrigin::try_origin(origin) .map(|_| ()) @@ -1427,11 +1427,11 @@ pub mod pallet { let owner = T::Lookup::lookup(owner)?; let delegate = T::Lookup::lookup(delegate)?; - let approval = - Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + let approval = Approvals::::take((id.clone(), &owner, &delegate)) + .ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); d.approvals.saturating_dec(); - Asset::::insert(id, d); + Asset::::insert(id.clone(), d); Self::deposit_event(Event::ApprovalCancelled { asset_id: id, owner, delegate }); Ok(()) @@ -1529,7 +1529,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - let mut details = Asset::::get(id).ok_or(Error::::Unknown)?; + let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(origin == details.owner, Error::::NoPermission); let old_min_balance = details.min_balance; @@ -1619,7 +1619,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let id: T::AssetId = id.into(); - let d = Asset::::get(id).ok_or(Error::::Unknown)?; + let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, Error::::AssetNotLive @@ -1627,7 +1627,7 @@ pub mod pallet { ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - Account::::try_mutate(id, &who, |maybe_account| -> DispatchResult { + Account::::try_mutate(&id, &who, |maybe_account| -> DispatchResult { maybe_account.as_mut().ok_or(Error::::NoAccount)?.status = AccountStatus::Blocked; Ok(()) diff --git a/frame/assets/src/migration.rs b/frame/assets/src/migration.rs index 90400ef5bd1dc..d854a64afb57f 100644 --- a/frame/assets/src/migration.rs +++ b/frame/assets/src/migration.rs @@ -18,6 +18,9 @@ use super::*; use frame_support::{log, traits::OnRuntimeUpgrade}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + pub mod v1 { use frame_support::{pallet_prelude::*, weights::Weight}; @@ -92,7 +95,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { frame_support::ensure!( Pallet::::on_chain_storage_version() == 0, "must upgrade linearly" @@ -102,13 +105,13 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(prev_count: Vec) -> Result<(), &'static str> { + fn post_upgrade(prev_count: Vec) -> Result<(), TryRuntimeError> { let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( "the state parameter should be something that was generated by pre_upgrade", ); let post_count = Asset::::iter().count() as u32; - assert_eq!( - prev_count, post_count, + ensure!( + prev_count == post_count, "the asset count before and after the migration should be the same" ); @@ -116,17 +119,18 @@ pub mod v1 { let onchain_version = Pallet::::on_chain_storage_version(); frame_support::ensure!(current_version == 1, "must_upgrade"); - assert_eq!( - current_version, onchain_version, + ensure!( + current_version == onchain_version, "after migration, the current_version and onchain_version should be the same" ); - Asset::::iter().for_each(|(_id, asset)| { - assert!( + Asset::::iter().try_for_each(|(_id, asset)| -> Result<(), TryRuntimeError> { + ensure!( asset.status == AssetStatus::Live || asset.status == AssetStatus::Frozen, - "assets should only be live or frozen. None should be in destroying status, or undefined state" - ) - }); + "assets should only be live or frozen. None should be in destroying status, or undefined state" + ); + Ok(()) + })?; Ok(()) } } diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 3926d2fa8b010..32ad02da90412 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -23,25 +23,21 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ construct_runtime, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64, GenesisBuild}, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; use sp_io::storage; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Assets: pallet_assets::{Pallet, Call, Storage, Event}, } @@ -56,13 +52,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -87,7 +82,7 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - type HoldIdentifier = (); + type RuntimeHoldReason = (); type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); @@ -205,7 +200,7 @@ pub(crate) fn take_hooks() -> Vec { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let config: pallet_assets::GenesisConfig = pallet_assets::GenesisConfig { assets: vec![ diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 9eb1107aa5209..06d4ec1211737 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -1741,3 +1741,37 @@ fn weights_sane() { let info = crate::Call::::finish_destroy { id: 10 }.get_dispatch_info(); assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.weight); } + +#[test] +fn asset_destroy_refund_existence_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + Balances::make_free_balance_be(&1, 100); + let admin = 1; + let admin_origin = RuntimeOrigin::signed(admin); + + let account2 = 2; // account with own deposit + let account3 = 3; // account with admin's deposit + Balances::make_free_balance_be(&account2, 100); + + assert_eq!(Balances::reserved_balance(&account2), 0); + assert_eq!(Balances::reserved_balance(&account3), 0); + assert_eq!(Balances::reserved_balance(&admin), 0); + + assert_ok!(Assets::touch(RuntimeOrigin::signed(account2), 0)); + assert_ok!(Assets::touch_other(admin_origin.clone(), 0, account3)); + + assert_eq!(Balances::reserved_balance(&account2), 10); + assert_eq!(Balances::reserved_balance(&account3), 0); + assert_eq!(Balances::reserved_balance(&admin), 10); + + assert_ok!(Assets::start_destroy(admin_origin.clone(), 0)); + assert_ok!(Assets::destroy_accounts(admin_origin.clone(), 0)); + assert_ok!(Assets::destroy_approvals(admin_origin.clone(), 0)); + assert_ok!(Assets::finish_destroy(admin_origin.clone(), 0)); + + assert_eq!(Balances::reserved_balance(&account2), 0); + assert_eq!(Balances::reserved_balance(&account3), 0); + assert_eq!(Balances::reserved_balance(&admin), 0); + }); +} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index 559afccb946c5..67f9bf07f5e7e 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -22,7 +22,7 @@ use frame_support::{ pallet_prelude::*, traits::{fungible, tokens::ConversionToAssetBalance}, }; -use sp_runtime::{traits::Convert, FixedPointNumber, FixedPointOperand, FixedU128}; +use sp_runtime::{traits::Convert, FixedPointNumber, FixedU128}; pub(super) type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; @@ -293,8 +293,6 @@ where T: Config, I: 'static, CON: Convert, AssetBalanceOf>, - BalanceOf: FixedPointOperand + Zero, - AssetBalanceOf: FixedPointOperand + Zero, { type Error = ConversionError; diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index 76ac585ff41f8..f20f7e317cff7 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -18,26 +18,28 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/assets/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -95,8 +97,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_668_000 picoseconds. - Weight::from_parts(32_079_000, 3675) + // Minimum execution time: 31_340_000 picoseconds. + Weight::from_parts(31_977_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -106,8 +108,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 14_885_000 picoseconds. - Weight::from_parts(15_358_000, 3675) + // Minimum execution time: 13_342_000 picoseconds. + Weight::from_parts(13_782_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -117,8 +119,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_295_000 picoseconds. - Weight::from_parts(15_639_000, 3675) + // Minimum execution time: 14_437_000 picoseconds. + Weight::from_parts(14_833_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,10 +135,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 19_916_000 picoseconds. - Weight::from_parts(20_220_000, 3675) - // Standard Error: 7_298 - .saturating_add(Weight::from_parts(12_553_976, 0).saturating_mul(c.into())) + // Minimum execution time: 18_728_000 picoseconds. + Weight::from_parts(18_982_000, 3675) + // Standard Error: 11_708 + .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -152,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 20_322_000 picoseconds. - Weight::from_parts(20_744_000, 3675) - // Standard Error: 12_314 - .saturating_add(Weight::from_parts(14_767_353, 0).saturating_mul(a.into())) + // Minimum execution time: 18_611_000 picoseconds. + Weight::from_parts(18_970_000, 3675) + // Standard Error: 13_224 + .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -170,8 +172,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_668_000 picoseconds. - Weight::from_parts(16_016_000, 3675) + // Minimum execution time: 14_504_000 picoseconds. + Weight::from_parts(14_906_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -183,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 28_227_000 picoseconds. - Weight::from_parts(28_769_000, 3675) + // Minimum execution time: 26_653_000 picoseconds. + Weight::from_parts(27_260_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -196,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 34_672_000 picoseconds. - Weight::from_parts(34_902_000, 3675) + // Minimum execution time: 33_625_000 picoseconds. + Weight::from_parts(34_474_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -211,8 +213,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 49_003_000 picoseconds. - Weight::from_parts(49_345_000, 6208) + // Minimum execution time: 47_609_000 picoseconds. + Weight::from_parts(48_476_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -226,8 +228,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_429_000 picoseconds. - Weight::from_parts(43_936_000, 6208) + // Minimum execution time: 41_625_000 picoseconds. + Weight::from_parts(43_030_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -241,8 +243,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 49_177_000 picoseconds. - Weight::from_parts(49_548_000, 6208) + // Minimum execution time: 47_661_000 picoseconds. + Weight::from_parts(48_469_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -254,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 19_323_000 picoseconds. - Weight::from_parts(19_945_000, 3675) + // Minimum execution time: 17_727_000 picoseconds. + Weight::from_parts(18_384_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,8 +269,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 19_543_000 picoseconds. - Weight::from_parts(19_747_000, 3675) + // Minimum execution time: 17_657_000 picoseconds. + Weight::from_parts(18_282_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -278,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_623_000 picoseconds. - Weight::from_parts(15_833_000, 3675) + // Minimum execution time: 13_743_000 picoseconds. + Weight::from_parts(14_193_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -289,8 +291,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_396_000 picoseconds. - Weight::from_parts(15_704_000, 3675) + // Minimum execution time: 13_653_000 picoseconds. + Weight::from_parts(14_263_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,8 +304,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 17_205_000 picoseconds. - Weight::from_parts(17_546_000, 3675) + // Minimum execution time: 15_328_000 picoseconds. + Weight::from_parts(16_042_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -313,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_049_000 picoseconds. - Weight::from_parts(16_317_000, 3675) + // Minimum execution time: 14_097_000 picoseconds. + Weight::from_parts(14_641_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -324,16 +326,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { + fn set_metadata(_n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 31_574_000 picoseconds. - Weight::from_parts(32_447_787, 3675) - // Standard Error: 904 - .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) - // Standard Error: 904 - .saturating_add(Weight::from_parts(271, 0).saturating_mul(s.into())) + // Minimum execution time: 29_535_000 picoseconds. + Weight::from_parts(31_456_892, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -345,8 +343,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 31_865_000 picoseconds. - Weight::from_parts(32_160_000, 3675) + // Minimum execution time: 30_680_000 picoseconds. + Weight::from_parts(31_930_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -356,16 +354,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(16_432_499, 3675) - // Standard Error: 1_563 - .saturating_add(Weight::from_parts(5_818, 0).saturating_mul(n.into())) - // Standard Error: 1_563 - .saturating_add(Weight::from_parts(9_660, 0).saturating_mul(s.into())) + // Minimum execution time: 14_660_000 picoseconds. + Weight::from_parts(15_718_387, 3675) + // Standard Error: 622 + .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -377,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 33_443_000 picoseconds. - Weight::from_parts(56_533_000, 3675) + // Minimum execution time: 30_853_000 picoseconds. + Weight::from_parts(31_483_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -388,8 +384,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(23_960_000, 3675) + // Minimum execution time: 13_632_000 picoseconds. + Weight::from_parts(14_077_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -401,8 +397,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 35_987_000 picoseconds. - Weight::from_parts(36_429_000, 3675) + // Minimum execution time: 33_780_000 picoseconds. + Weight::from_parts(34_533_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -418,8 +414,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 68_059_000 picoseconds. - Weight::from_parts(69_845_000, 6208) + // Minimum execution time: 67_712_000 picoseconds. + Weight::from_parts(69_946_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -431,8 +427,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_066_000 picoseconds. - Weight::from_parts(38_450_000, 3675) + // Minimum execution time: 36_668_000 picoseconds. + Weight::from_parts(37_637_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -444,8 +440,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_500_000 picoseconds. - Weight::from_parts(38_953_000, 3675) + // Minimum execution time: 36_685_000 picoseconds. + Weight::from_parts(37_950_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -455,8 +451,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_268_000 picoseconds. - Weight::from_parts(16_764_000, 3675) + // Minimum execution time: 14_466_000 picoseconds. + Weight::from_parts(14_924_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -470,8 +466,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 37_468_000 picoseconds. - Weight::from_parts(37_957_000, 3675) + // Minimum execution time: 34_874_000 picoseconds. + Weight::from_parts(36_330_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -483,10 +479,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 383_408_000 picoseconds. - Weight::from_parts(392_036_000, 3675) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) + // Minimum execution time: 33_278_000 picoseconds. + Weight::from_parts(34_104_000, 3675) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) @@ -498,8 +494,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 34_066_000 picoseconds. - Weight::from_parts(34_347_000, 3675) + // Minimum execution time: 32_898_000 picoseconds. + Weight::from_parts(33_489_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -511,8 +507,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 32_060_000 picoseconds. - Weight::from_parts(32_519_000, 3675) + // Minimum execution time: 31_243_000 picoseconds. + Weight::from_parts(31_909_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -524,8 +520,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 115_000_000 picoseconds. - Weight::from_parts(163_000_000, 3675) + // Minimum execution time: 17_692_000 picoseconds. + Weight::from_parts(18_253_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -541,8 +537,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_668_000 picoseconds. - Weight::from_parts(32_079_000, 3675) + // Minimum execution time: 31_340_000 picoseconds. + Weight::from_parts(31_977_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -552,8 +548,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 14_885_000 picoseconds. - Weight::from_parts(15_358_000, 3675) + // Minimum execution time: 13_342_000 picoseconds. + Weight::from_parts(13_782_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -563,8 +559,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_295_000 picoseconds. - Weight::from_parts(15_639_000, 3675) + // Minimum execution time: 14_437_000 picoseconds. + Weight::from_parts(14_833_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -579,10 +575,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 19_916_000 picoseconds. - Weight::from_parts(20_220_000, 3675) - // Standard Error: 7_298 - .saturating_add(Weight::from_parts(12_553_976, 0).saturating_mul(c.into())) + // Minimum execution time: 18_728_000 picoseconds. + Weight::from_parts(18_982_000, 3675) + // Standard Error: 11_708 + .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -598,10 +594,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 20_322_000 picoseconds. - Weight::from_parts(20_744_000, 3675) - // Standard Error: 12_314 - .saturating_add(Weight::from_parts(14_767_353, 0).saturating_mul(a.into())) + // Minimum execution time: 18_611_000 picoseconds. + Weight::from_parts(18_970_000, 3675) + // Standard Error: 13_224 + .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -616,8 +612,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_668_000 picoseconds. - Weight::from_parts(16_016_000, 3675) + // Minimum execution time: 14_504_000 picoseconds. + Weight::from_parts(14_906_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -629,8 +625,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 28_227_000 picoseconds. - Weight::from_parts(28_769_000, 3675) + // Minimum execution time: 26_653_000 picoseconds. + Weight::from_parts(27_260_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -642,8 +638,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 34_672_000 picoseconds. - Weight::from_parts(34_902_000, 3675) + // Minimum execution time: 33_625_000 picoseconds. + Weight::from_parts(34_474_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -657,8 +653,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 49_003_000 picoseconds. - Weight::from_parts(49_345_000, 6208) + // Minimum execution time: 47_609_000 picoseconds. + Weight::from_parts(48_476_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -672,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_429_000 picoseconds. - Weight::from_parts(43_936_000, 6208) + // Minimum execution time: 41_625_000 picoseconds. + Weight::from_parts(43_030_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -687,8 +683,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 49_177_000 picoseconds. - Weight::from_parts(49_548_000, 6208) + // Minimum execution time: 47_661_000 picoseconds. + Weight::from_parts(48_469_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -700,8 +696,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 19_323_000 picoseconds. - Weight::from_parts(19_945_000, 3675) + // Minimum execution time: 17_727_000 picoseconds. + Weight::from_parts(18_384_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -713,8 +709,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 19_543_000 picoseconds. - Weight::from_parts(19_747_000, 3675) + // Minimum execution time: 17_657_000 picoseconds. + Weight::from_parts(18_282_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -724,8 +720,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_623_000 picoseconds. - Weight::from_parts(15_833_000, 3675) + // Minimum execution time: 13_743_000 picoseconds. + Weight::from_parts(14_193_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -735,8 +731,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 15_396_000 picoseconds. - Weight::from_parts(15_704_000, 3675) + // Minimum execution time: 13_653_000 picoseconds. + Weight::from_parts(14_263_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -748,8 +744,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 17_205_000 picoseconds. - Weight::from_parts(17_546_000, 3675) + // Minimum execution time: 15_328_000 picoseconds. + Weight::from_parts(16_042_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -759,8 +755,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_049_000 picoseconds. - Weight::from_parts(16_317_000, 3675) + // Minimum execution time: 14_097_000 picoseconds. + Weight::from_parts(14_641_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -770,16 +766,12 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { + fn set_metadata(_n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 31_574_000 picoseconds. - Weight::from_parts(32_447_787, 3675) - // Standard Error: 904 - .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) - // Standard Error: 904 - .saturating_add(Weight::from_parts(271, 0).saturating_mul(s.into())) + // Minimum execution time: 29_535_000 picoseconds. + Weight::from_parts(31_456_892, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -791,8 +783,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 31_865_000 picoseconds. - Weight::from_parts(32_160_000, 3675) + // Minimum execution time: 30_680_000 picoseconds. + Weight::from_parts(31_930_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -802,16 +794,14 @@ impl WeightInfo for () { /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(16_432_499, 3675) - // Standard Error: 1_563 - .saturating_add(Weight::from_parts(5_818, 0).saturating_mul(n.into())) - // Standard Error: 1_563 - .saturating_add(Weight::from_parts(9_660, 0).saturating_mul(s.into())) + // Minimum execution time: 14_660_000 picoseconds. + Weight::from_parts(15_718_387, 3675) + // Standard Error: 622 + .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -823,8 +813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 33_443_000 picoseconds. - Weight::from_parts(56_533_000, 3675) + // Minimum execution time: 30_853_000 picoseconds. + Weight::from_parts(31_483_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -834,8 +824,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(23_960_000, 3675) + // Minimum execution time: 13_632_000 picoseconds. + Weight::from_parts(14_077_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -847,8 +837,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 35_987_000 picoseconds. - Weight::from_parts(36_429_000, 3675) + // Minimum execution time: 33_780_000 picoseconds. + Weight::from_parts(34_533_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -864,8 +854,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 68_059_000 picoseconds. - Weight::from_parts(69_845_000, 6208) + // Minimum execution time: 67_712_000 picoseconds. + Weight::from_parts(69_946_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -877,8 +867,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_066_000 picoseconds. - Weight::from_parts(38_450_000, 3675) + // Minimum execution time: 36_668_000 picoseconds. + Weight::from_parts(37_637_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -890,8 +880,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_500_000 picoseconds. - Weight::from_parts(38_953_000, 3675) + // Minimum execution time: 36_685_000 picoseconds. + Weight::from_parts(37_950_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -901,8 +891,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_268_000 picoseconds. - Weight::from_parts(16_764_000, 3675) + // Minimum execution time: 14_466_000 picoseconds. + Weight::from_parts(14_924_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -916,8 +906,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 37_468_000 picoseconds. - Weight::from_parts(37_957_000, 3675) + // Minimum execution time: 34_874_000 picoseconds. + Weight::from_parts(36_330_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -929,10 +919,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 383_408_000 picoseconds. - Weight::from_parts(392_036_000, 3675) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(2)) + // Minimum execution time: 33_278_000 picoseconds. + Weight::from_parts(34_104_000, 3675) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Assets Account (r:1 w:1) /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) @@ -944,8 +934,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 34_066_000 picoseconds. - Weight::from_parts(34_347_000, 3675) + // Minimum execution time: 32_898_000 picoseconds. + Weight::from_parts(33_489_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -957,8 +947,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 32_060_000 picoseconds. - Weight::from_parts(32_519_000, 3675) + // Minimum execution time: 31_243_000 picoseconds. + Weight::from_parts(31_909_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -970,8 +960,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 115_000_000 picoseconds. - Weight::from_parts(163_000_000, 3675) + // Minimum execution time: 17_692_000 picoseconds. + Weight::from_parts(18_253_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index fb0f9c0e47957..bf5018250d8c9 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -13,14 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -36,5 +36,11 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index ed89a88698a3c..8094c06030120 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -50,6 +50,7 @@ use frame_support::{ weights::Weight, RuntimeDebugNoBound, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; @@ -69,7 +70,7 @@ pub struct PendingSwap { /// Action of this swap. pub action: T::SwapAction, /// End block of the lock. - pub end_block: T::BlockNumber, + pub end_block: BlockNumberFor, } /// Hashed proof type. @@ -184,7 +185,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::storage] pub type PendingSwaps = StorageDoubleMap< @@ -249,7 +250,7 @@ pub mod pallet { target: T::AccountId, hashed_proof: HashedProof, action: T::SwapAction, - duration: T::BlockNumber, + duration: BlockNumberFor, ) -> DispatchResult { let source = ensure_signed(origin)?; ensure!( diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 7437d62a99c95..858417e8007fb 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -6,20 +6,16 @@ use crate as pallet_atomic_swap; use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, AtomicSwap: pallet_atomic_swap::{Pallet, Call, Event}, } @@ -31,14 +27,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -64,7 +59,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -78,7 +73,7 @@ const A: u64 = 1; const B: u64 = 2; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let genesis = pallet_balances::GenesisConfig:: { balances: vec![(A, 100), (B, 200)] }; genesis.assimilate_storage(&mut t).unwrap(); t.into() diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index ee6465ad3967f..99c8c2fb497f0 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -39,5 +39,13 @@ std = [ "sp-consensus-aura/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", + "sp-io/std" ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" +] +experimental = [] diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index 12b7ee3f5fedc..641d5fc47e90e 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -60,6 +60,23 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::aura"; +/// A slot duration provider which infers the slot duration from the +/// [`pallet_timestamp::Config::MinimumPeriod`] by multiplying it by two, to ensure +/// that authors have the majority of their slot to author within. +/// +/// This was the default behavior of the Aura pallet and may be used for +/// backwards compatibility. +/// +/// Note that this type is likely not useful without the `experimental` +/// feature. +pub struct MinimumPeriodTimesTwo(sp_std::marker::PhantomData); + +impl Get for MinimumPeriodTimesTwo { + fn get() -> T::Moment { + ::MinimumPeriod::get().saturating_mul(2u32.into()) + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -81,6 +98,30 @@ pub mod pallet { /// Blocks authored by a disabled validator will lead to a panic as part of this module's /// initialization. type DisabledValidators: DisabledValidators; + + /// Whether to allow block authors to create multiple blocks per slot. + /// + /// If this is `true`, the pallet will allow slots to stay the same across sequential + /// blocks. If this is `false`, the pallet will require that subsequent blocks always have + /// higher slots than previous ones. + /// + /// Regardless of the setting of this storage value, the pallet will always enforce the + /// invariant that slots don't move backwards as the chain progresses. + /// + /// The typical value for this should be 'false' unless this pallet is being augmented by + /// another pallet which enforces some limitation on the number of blocks authors can create + /// using the same slot. + type AllowMultipleBlocksPerSlot: Get; + + /// The slot duration Aura should run with, expressed in milliseconds. + /// The effective value of this type should not change while the chain is running. + /// + /// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const. + /// + /// This associated type is only present when compiled with the `experimental` + /// feature. + #[cfg(feature = "experimental")] + type SlotDuration: Get<::Moment>; } #[pallet::pallet] @@ -88,11 +129,16 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(_: T::BlockNumber) -> Weight { + fn on_initialize(_: BlockNumberFor) -> Weight { if let Some(new_slot) = Self::current_slot_from_digests() { let current_slot = CurrentSlot::::get(); - assert!(current_slot < new_slot, "Slot must increase"); + if T::AllowMultipleBlocksPerSlot::get() { + assert!(current_slot <= new_slot, "Slot must not decrease"); + } else { + assert!(current_slot < new_slot, "Slot must increase"); + } + CurrentSlot::::put(new_slot); if let Some(n_authorities) = >::decode_len() { @@ -113,6 +159,11 @@ pub mod pallet { T::DbWeight::get().reads(1) } } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } } /// The current authority set. @@ -135,7 +186,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize_authorities(&self.authorities); } @@ -194,9 +245,66 @@ impl Pallet { /// Determine the Aura slot-duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { - // we double the minimum block-period so each author can always propose within - // the majority of its slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) + #[cfg(feature = "experimental")] + { + T::SlotDuration::get() + } + + #[cfg(not(feature = "experimental"))] + { + // we double the minimum block-period so each author can always propose within + // the majority of its slot. + ::MinimumPeriod::get().saturating_mul(2u32.into()) + } + } + + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + /// + /// # Invariants + /// + /// ## `CurrentSlot` + /// + /// If we don't allow for multiple blocks per slot, then the current slot must be less than the + /// maximal slot number. Otherwise, it can be arbitrary. + /// + /// ## `Authorities` + /// + /// * The authorities must be non-empty. + /// * The current authority cannot be disabled. + /// * The number of authorities must be less than or equal to `T::MaxAuthorities`. This however, + /// is guarded by the type system. + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // We don't have any guarantee that we are already after `on_initialize` and thus we have to + // check the current slot from the digest or take the last known slot. + let current_slot = + Self::current_slot_from_digests().unwrap_or_else(|| CurrentSlot::::get()); + + // Check that the current slot is less than the maximal slot number, unless we allow for + // multiple blocks per slot. + if !T::AllowMultipleBlocksPerSlot::get() { + frame_support::ensure!( + current_slot < u64::MAX, + "Current slot has reached maximum value and cannot be incremented further.", + ); + } + + let authorities_len = + >::decode_len().ok_or("Failed to decode authorities length")?; + + // Check that the authorities are non-empty. + frame_support::ensure!(!authorities_len.is_zero(), "Authorities must be non-empty."); + + // Check that the current authority is not disabled. + let authority_index = *current_slot % authorities_len as u64; + frame_support::ensure!( + !T::DisabledValidators::is_disabled(authority_index as u32), + "Current validator is disabled and should not be attempting to author blocks.", + ); + + Ok(()) } } diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 72d11a0749933..39b798c2f6841 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -22,25 +22,20 @@ use crate as pallet_aura; use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, DisabledValidators, GenesisBuild}, + traits::{ConstU32, ConstU64, DisabledValidators}, }; use sp_consensus_aura::{ed25519::AuthorityId, AuthorityIndex}; use sp_core::H256; -use sp_runtime::{ - testing::{Header, UintAuthorityId}, - traits::IdentityLookup, -}; +use sp_runtime::{testing::UintAuthorityId, traits::IdentityLookup, BuildStorage}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; +const SLOT_DURATION: u64 = 2; + frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Aura: pallet_aura::{Pallet, Storage, Config}, } @@ -52,14 +47,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -76,12 +70,13 @@ impl frame_system::Config for Test { impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<1>; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; type WeightInfo = (); } parameter_types! { static DisabledValidatorTestValue: Vec = Default::default(); + pub static AllowMultipleBlocksPerSlot: bool = false; } pub struct MockDisabledValidators; @@ -106,14 +101,26 @@ impl pallet_aura::Config for Test { type AuthorityId = AuthorityId; type DisabledValidators = MockDisabledValidators; type MaxAuthorities = ConstU32<10>; + type AllowMultipleBlocksPerSlot = AllowMultipleBlocksPerSlot; + + #[cfg(feature = "experimental")] + type SlotDuration = ConstU64; } -pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); +fn build_ext(authorities: Vec) -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_aura::GenesisConfig:: { authorities: authorities.into_iter().map(|a| UintAuthorityId(a).to_public_key()).collect(), } - .assimilate_storage(&mut t) + .assimilate_storage(&mut storage) .unwrap(); - t.into() + storage.into() +} + +pub fn build_ext_and_execute_test(authorities: Vec, test: impl FnOnce() -> ()) { + let mut ext = build_ext(authorities); + ext.execute_with(|| { + test(); + Aura::do_try_state().expect("Storage invariants should hold") + }); } diff --git a/frame/aura/src/tests.rs b/frame/aura/src/tests.rs index 1ed937a7745fe..d3ce877d3e60d 100644 --- a/frame/aura/src/tests.rs +++ b/frame/aura/src/tests.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::mock::{new_test_ext, Aura, MockDisabledValidators, System}; +use crate::mock::{build_ext_and_execute_test, Aura, MockDisabledValidators, System}; use codec::Encode; use frame_support::traits::OnInitialize; use sp_consensus_aura::{Slot, AURA_ENGINE_ID}; @@ -27,7 +27,7 @@ use sp_runtime::{Digest, DigestItem}; #[test] fn initial_values() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + build_ext_and_execute_test(vec![0, 1, 2, 3], || { assert_eq!(Aura::current_slot(), 0u64); assert_eq!(Aura::authorities().len(), 4); }); @@ -38,7 +38,7 @@ fn initial_values() { expected = "Validator with index 1 is disabled and should not be attempting to author blocks." )] fn disabled_validators_cannot_author_blocks() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + build_ext_and_execute_test(vec![0, 1, 2, 3], || { // slot 1 should be authored by validator at index 1 let slot = Slot::from(1); let pre_digest = @@ -54,3 +54,64 @@ fn disabled_validators_cannot_author_blocks() { Aura::on_initialize(42); }); } + +#[test] +#[should_panic(expected = "Slot must increase")] +fn pallet_requires_slot_to_increase_unless_allowed() { + build_ext_and_execute_test(vec![0, 1, 2, 3], || { + crate::mock::AllowMultipleBlocksPerSlot::set(false); + + let slot = Slot::from(1); + let pre_digest = + Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())] }; + + System::reset_events(); + System::initialize(&42, &System::parent_hash(), &pre_digest); + + // and we should not be able to initialize the block with the same slot a second time. + Aura::on_initialize(42); + Aura::on_initialize(42); + }); +} + +#[test] +fn pallet_can_allow_unchanged_slot() { + build_ext_and_execute_test(vec![0, 1, 2, 3], || { + let slot = Slot::from(1); + let pre_digest = + Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())] }; + + System::reset_events(); + System::initialize(&42, &System::parent_hash(), &pre_digest); + + crate::mock::AllowMultipleBlocksPerSlot::set(true); + + // and we should be able to initialize the block with the same slot a second time. + Aura::on_initialize(42); + Aura::on_initialize(42); + }); +} + +#[test] +#[should_panic(expected = "Slot must not decrease")] +fn pallet_always_rejects_decreasing_slot() { + build_ext_and_execute_test(vec![0, 1, 2, 3], || { + let slot = Slot::from(2); + let pre_digest = + Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())] }; + + System::reset_events(); + System::initialize(&42, &System::parent_hash(), &pre_digest); + + crate::mock::AllowMultipleBlocksPerSlot::set(true); + + Aura::on_initialize(42); + System::finalize(); + + let earlier_slot = Slot::from(1); + let pre_digest = + Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, earlier_slot.encode())] }; + System::initialize(&43, &System::parent_hash(), &pre_digest); + Aura::on_initialize(43); + }); +} diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index d8693bd85bffb..4e5025d7c770e 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } @@ -22,14 +22,14 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys pallet-session = { version = "4.0.0-dev", default-features = false, features = [ "historical", ], path = "../session" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../primitives/authority-discovery" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -43,5 +43,12 @@ std = [ "sp-authority-discovery/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", + "sp-io/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-session/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 6365c95359472..87b743ae19677 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -59,14 +59,16 @@ pub mod pallet { pub(super) type NextKeys = StorageValue<_, WeakBoundedVec, ValueQuery>; - #[derive(Default)] + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] - pub struct GenesisConfig { + pub struct GenesisConfig { pub keys: Vec, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize_keys(&self.keys) } @@ -168,30 +170,26 @@ mod tests { use crate as pallet_authority_discovery; use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64}, }; use sp_application_crypto::Pair; use sp_authority_discovery::AuthorityPair; use sp_core::{crypto::key_types, H256}; use sp_io::TestExternalities; use sp_runtime::{ - testing::{Header, UintAuthorityId}, + testing::UintAuthorityId, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, - KeyTypeId, Perbill, + BuildStorage, KeyTypeId, Perbill, }; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, } ); @@ -233,14 +231,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = BlockNumber; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AuthorityId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -308,13 +305,11 @@ mod tests { .collect::>(); // Build genesis. - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - GenesisBuild::::assimilate_storage( - &pallet_authority_discovery::GenesisConfig { keys: vec![] }, - &mut t, - ) - .unwrap(); + pallet_authority_discovery::GenesisConfig:: { keys: vec![], ..Default::default() } + .assimilate_storage(&mut t) + .unwrap(); // Create externalities. let mut externalities = TestExternalities::new(t); diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 6d8cdd9371e94..14329367b0905 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -36,5 +36,11 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", + "sp-io/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 4bb8ba587ac8b..a9bd0c38cb67c 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -45,7 +45,7 @@ pub mod pallet { /// Find the author of a block. type FindAuthor: FindAuthor; /// An event handler for authored blocks. - type EventHandler: EventHandler; + type EventHandler: EventHandler>; } #[pallet::pallet] @@ -53,7 +53,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(_: T::BlockNumber) -> Weight { + fn on_initialize(_: BlockNumberFor) -> Weight { if let Some(author) = Self::author() { T::EventHandler::note_author(author); } @@ -61,7 +61,7 @@ pub mod pallet { Weight::zero() } - fn on_finalize(_: T::BlockNumber) { + fn on_finalize(_: BlockNumberFor) { // ensure we never go to trie with these values. >::kill(); } @@ -106,18 +106,15 @@ mod tests { generic::DigestItem, testing::Header, traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, + BuildStorage, }; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Authorship: pallet_authorship::{Pallet, Storage}, } ); @@ -128,14 +125,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -188,7 +184,7 @@ mod tests { } fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); t.into() } diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index ff9e4b3aeac43..34cc6cc09baad 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -13,23 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } @@ -37,7 +37,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-offences = { version = "4.0.0-dev", path = "../offences" } pallet-staking = { version = "4.0.0-dev", path = "../staking" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -59,6 +59,32 @@ std = [ "sp-session/std", "sp-staking/std", "sp-std/std", + "frame-election-provider-support/std", + "pallet-balances/std", + "pallet-offences/std", + "pallet-staking/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-offences/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-offences/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index 2e880fd67cc22..1f7de2b28c252 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -28,15 +28,11 @@ impl crate::WeightInfo for () { DbWeight::get().writes(1) } - fn report_equivocation(validator_count: u32) -> Weight { + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. let validator_count = validator_count.max(100) as u64; - // worst case we are considering is that the given offender - // is backed by 200 nominators - const MAX_NOMINATORS: u64 = 200; - // checking membership proof Weight::from_parts(35u64 * WEIGHT_REF_TIME_PER_MICROS, 0) .saturating_add( @@ -49,10 +45,10 @@ impl crate::WeightInfo for () { // report offence .saturating_add(Weight::from_parts(110u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) .saturating_add(Weight::from_parts( - 25u64 * WEIGHT_REF_TIME_PER_MICROS * MAX_NOMINATORS, + 25u64 * WEIGHT_REF_TIME_PER_MICROS * max_nominators_per_validator as u64, 0, )) - .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().reads(14 + 3 * max_nominators_per_validator as u64)) + .saturating_add(DbWeight::get().writes(10 + 3 * max_nominators_per_validator as u64)) } } diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 3a14cacc905d2..ed1df640583b2 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -34,6 +34,7 @@ //! definition. use frame_support::traits::{Get, KeyOwnerProofSystem}; +use frame_system::pallet_prelude::HeaderFor; use log::{error, info}; use sp_consensus_babe::{AuthorityId, EquivocationProof, Slot, KEY_TYPE}; @@ -99,14 +100,14 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactioinsTypes`. +/// `offchain::SendTransactionTypes`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, R, P, L)>); impl - OffenceReportSystem, (EquivocationProof, T::KeyOwnerProof)> + OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> for EquivocationReportSystem where T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, @@ -122,7 +123,7 @@ where type Longevity = L; fn publish_evidence( - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), ()> { use frame_system::offchain::SubmitTransaction; let (equivocation_proof, key_owner_proof) = evidence; @@ -140,7 +141,7 @@ where } fn check_evidence( - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), TransactionValidityError> { let (equivocation_proof, key_owner_proof) = evidence; @@ -159,7 +160,7 @@ where fn process_evidence( reporter: Option, - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; let reporter = reporter.or_else(|| >::author()); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 8001450b43583..9549fac9fe2b6 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -29,6 +29,7 @@ use frame_support::{ weights::Weight, BoundedVec, WeakBoundedVec, }; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, AllowedSlots, BabeAuthorityWeight, BabeEpochConfiguration, ConsensusLog, Epoch, @@ -71,14 +72,14 @@ pub use pallet::*; pub trait WeightInfo { fn plan_config_change() -> Weight; - fn report_equivocation(validator_count: u32) -> Weight; + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; } /// Trigger an epoch change, if any should take place. pub trait EpochChangeTrigger { /// Trigger an epoch change, if any should take place. This should be called /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + fn trigger(now: BlockNumberFor); } /// A type signifying to BABE that an external trigger @@ -86,7 +87,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. + fn trigger(_: BlockNumberFor) {} // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -94,7 +95,7 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { + fn trigger(now: BlockNumberFor) { if >::should_epoch_change(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); @@ -152,6 +153,10 @@ pub mod pallet { #[pallet::constant] type MaxAuthorities: Get; + /// The maximum number of nominators for each validator. + #[pallet::constant] + type MaxNominators: Get; + /// The proof of key ownership, used for validating equivocation reports. /// The proof must include the session index and validator count of the /// session at which the equivocation occurred. @@ -162,7 +167,7 @@ pub mod pallet { /// (from an offchain context). type EquivocationReportSystem: OffenceReportSystem< Option, - (EquivocationProof, Self::KeyOwnerProof), + (EquivocationProof>, Self::KeyOwnerProof), >; } @@ -279,7 +284,7 @@ pub mod pallet { /// slots, which may be skipped, the block numbers may not line up with the slot numbers. #[pallet::storage] pub(super) type EpochStart = - StorageValue<_, (T::BlockNumber, T::BlockNumber), ValueQuery>; + StorageValue<_, (BlockNumberFor, BlockNumberFor), ValueQuery>; /// How late the current block is compared to its parent. /// @@ -288,7 +293,7 @@ pub mod pallet { /// execution context should always yield zero. #[pallet::storage] #[pallet::getter(fn lateness)] - pub(super) type Lateness = StorageValue<_, T::BlockNumber, ValueQuery>; + pub(super) type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. @@ -314,15 +319,17 @@ pub mod pallet { pub(super) type SkippedEpochs = StorageValue<_, BoundedVec<(u64, SessionIndex), ConstU32<100>>, ValueQuery>; - #[derive(Default)] + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] - pub struct GenesisConfig { + pub struct GenesisConfig { pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, pub epoch_config: Option, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { SegmentIndex::::put(0); Pallet::::initialize_genesis_authorities(&self.authorities); @@ -404,10 +411,11 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::report_equivocation( key_owner_proof.validator_count(), + T::MaxNominators::get(), ))] pub fn report_equivocation( origin: OriginFor, - equivocation_proof: Box>, + equivocation_proof: Box>>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; @@ -430,10 +438,11 @@ pub mod pallet { #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::report_equivocation( key_owner_proof.validator_count(), + T::MaxNominators::get(), ))] pub fn report_equivocation_unsigned( origin: OriginFor, - equivocation_proof: Box>, + equivocation_proof: Box>>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; @@ -503,8 +512,8 @@ impl IsMember for Pallet { } } -impl pallet_session::ShouldEndSession for Pallet { - fn should_end_session(now: T::BlockNumber) -> bool { +impl pallet_session::ShouldEndSession> for Pallet { + fn should_end_session(now: BlockNumberFor) -> bool { // it might be (and it is in current implementation) that session module is calling // `should_end_session` from it's own `on_initialize` handler, in which case it's // possible that babe's own `on_initialize` has not run yet, so let's ensure that we @@ -524,7 +533,7 @@ impl Pallet { /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_epoch_change(now: T::BlockNumber) -> bool { + pub fn should_epoch_change(now: BlockNumberFor) -> bool { // The epoch has technically ended during the passage of time // between this block and the last, but we have to "end" the epoch now, // since there is no earlier possible block we could have done it. @@ -554,11 +563,11 @@ impl Pallet { // // WEIGHT NOTE: This function is tied to the weight of `EstimateNextSessionRotation`. If you // update this function, you must also update the corresponding weight. - pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { + pub fn next_expected_epoch_change(now: BlockNumberFor) -> Option> { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. - let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + let blocks_remaining: BlockNumberFor = slots_remaining.saturated_into(); now.saturating_add(blocks_remaining) }) } @@ -776,7 +785,7 @@ impl Pallet { Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } - fn initialize(now: T::BlockNumber) { + fn initialize(now: BlockNumberFor) { // since `initialize` can be called twice (e.g. if session module is present) // let's ensure that we only do the initialization once per block let initialized = Self::initialized().is_some(); @@ -811,7 +820,7 @@ impl Pallet { // how many slots were skipped between current and last block let lateness = current_slot.saturating_sub(CurrentSlot::::get() + 1); - let lateness = T::BlockNumber::from(*lateness as u32); + let lateness = BlockNumberFor::::from(*lateness as u32); Lateness::::put(lateness); CurrentSlot::::put(current_slot); @@ -877,7 +886,7 @@ impl Pallet { /// will push the transaction to the pool. Only useful in an offchain /// context. pub fn submit_unsigned_equivocation_report( - equivocation_proof: EquivocationProof, + equivocation_proof: EquivocationProof>, key_owner_proof: T::KeyOwnerProof, ) -> Option<()> { T::EquivocationReportSystem::publish_evidence((equivocation_proof, key_owner_proof)).ok() @@ -899,12 +908,14 @@ impl OnTimestampSet for Pallet { } } -impl frame_support::traits::EstimateNextSessionRotation for Pallet { - fn average_session_length() -> T::BlockNumber { +impl frame_support::traits::EstimateNextSessionRotation> + for Pallet +{ + fn average_session_length() -> BlockNumberFor { T::EpochDuration::get().saturated_into() } - fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + fn estimate_current_session_progress(_now: BlockNumberFor) -> (Option, Weight) { let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; ( @@ -914,7 +925,9 @@ impl frame_support::traits::EstimateNextSessionRotation (Option, Weight) { + fn estimate_next_session_rotation( + now: BlockNumberFor, + ) -> (Option>, Weight) { ( Self::next_expected_epoch_change(now), // Read: Current Slot, Epoch Index, Genesis Slot @@ -923,8 +936,8 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness for Pallet { - fn lateness(&self) -> T::BlockNumber { +impl frame_support::traits::Lateness> for Pallet { + fn lateness(&self) -> BlockNumberFor { Self::lateness() } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 4278fa4596a96..dbffe9f312e60 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -19,12 +19,16 @@ use crate::{self as pallet_babe, Config, CurrentSlot}; use codec::Encode; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ parameter_types, - traits::{ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; +use pallet_staking::FixedNominationsQuota; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{KeyTypeId, Pair, VrfSecret}, @@ -36,20 +40,16 @@ use sp_runtime::{ impl_opaque_keys, testing::{Digest, DigestItem, Header, TestXt}, traits::{Header as _, IdentityLookup, OpaqueKeys}, - Perbill, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; type DummyValidatorId = u64; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Authorship: pallet_authorship, @@ -69,15 +69,14 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = DummyValidatorId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; @@ -145,7 +144,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -166,6 +165,7 @@ parameter_types! { pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(16); + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -175,14 +175,12 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBounds; } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type RewardRemainder = (); - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; @@ -202,9 +200,10 @@ impl pallet_staking::Config for Test { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = (); + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -228,6 +227,7 @@ impl Config for Test { type DisabledValidators = Session; type WeightInfo = (); type MaxAuthorities = ConstU32<10>; + type MaxNominators = ConstU32<100>; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = super::EquivocationReportSystem; @@ -345,7 +345,7 @@ pub fn new_test_ext_with_pairs( } pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); diff --git a/frame/babe/src/randomness.rs b/frame/babe/src/randomness.rs index b9b24786b7a74..d3d1bea2292da 100644 --- a/frame/babe/src/randomness.rs +++ b/frame/babe/src/randomness.rs @@ -22,6 +22,7 @@ use super::{ AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, RANDOMNESS_LENGTH, }; use frame_support::traits::Randomness as RandomnessT; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::traits::{Hash, One, Saturating}; /// Randomness usable by consensus protocols that **depend** upon finality and take action @@ -129,8 +130,8 @@ pub struct ParentBlockRandomness(sp_std::marker::PhantomData); Please use `ParentBlockRandomness` instead.")] pub struct CurrentBlockRandomness(sp_std::marker::PhantomData); -impl RandomnessT for RandomnessFromTwoEpochsAgo { - fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { +impl RandomnessT> for RandomnessFromTwoEpochsAgo { + fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { let mut subject = subject.to_vec(); subject.reserve(RANDOMNESS_LENGTH); subject.extend_from_slice(&Randomness::::get()[..]); @@ -139,8 +140,8 @@ impl RandomnessT for RandomnessFromTwoEpochs } } -impl RandomnessT for RandomnessFromOneEpochAgo { - fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { +impl RandomnessT> for RandomnessFromOneEpochAgo { + fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { let mut subject = subject.to_vec(); subject.reserve(RANDOMNESS_LENGTH); subject.extend_from_slice(&NextRandomness::::get()[..]); @@ -149,8 +150,8 @@ impl RandomnessT for RandomnessFromOneEpochA } } -impl RandomnessT, T::BlockNumber> for ParentBlockRandomness { - fn random(subject: &[u8]) -> (Option, T::BlockNumber) { +impl RandomnessT, BlockNumberFor> for ParentBlockRandomness { + fn random(subject: &[u8]) -> (Option, BlockNumberFor) { let random = AuthorVrfRandomness::::get().map(|random| { let mut subject = subject.to_vec(); subject.reserve(RANDOMNESS_LENGTH); @@ -164,8 +165,8 @@ impl RandomnessT, T::BlockNumber> for ParentBlockRand } #[allow(deprecated)] -impl RandomnessT, T::BlockNumber> for CurrentBlockRandomness { - fn random(subject: &[u8]) -> (Option, T::BlockNumber) { +impl RandomnessT, BlockNumberFor> for CurrentBlockRandomness { + fn random(subject: &[u8]) -> (Option, BlockNumberFor) { let (random, _) = ParentBlockRandomness::::random(subject); (random, >::block_number()) } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 38edc2af7f272..ae0c3e3873c50 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -815,7 +815,7 @@ fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -823,7 +823,7 @@ fn report_equivocation_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 1678ce1ba2ac6..222d64dc229f6 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -13,12 +13,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # primitives -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } # FRAME frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } @@ -27,18 +27,20 @@ frame-election-provider-support = { version = "4.0.0-dev", default-features = fa # third party log = { version = "0.4.17", default-features = false } +docify = "0.2.1" +aquamarine = { version = "0.3.2" } # Optional imports for benchmarking frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking", optional = true, default-features = false } pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true, default-features = false } -sp-core = { version = "7.0.0", path = "../../primitives/core", optional = true, default-features = false } -sp-io = { version = "7.0.0", path = "../../primitives/io", optional = true, default-features = false } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing", optional = true, default-features = false } +sp-core = { version = "21.0.0", path = "../../primitives/core", optional = true, default-features = false } +sp-io = { version = "23.0.0", path = "../../primitives/io", optional = true, default-features = false } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing", optional = true, default-features = false } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core"} -sp-io = { version = "7.0.0", path = "../../primitives/io"} -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", path = "../../primitives/core"} +sp-io = { version = "23.0.0", path = "../../primitives/io"} +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } @@ -67,6 +69,9 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "sp-tracing", "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] fuzz = [ "sp-core", @@ -75,4 +80,10 @@ fuzz = [ "sp-tracing", "frame-election-provider-support/fuzz", ] -try-runtime = [ "frame-support/try-runtime", "frame-election-provider-support/try-runtime" ] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances?/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index 6e951b43a4aeb..832a11d29d89e 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -21,11 +21,11 @@ frame-system = { path = "../../system", version = "4.0.0-dev" } frame-support = { path = "../../support", version = "4.0.0-dev" } # core -sp-storage = { path = "../../../primitives/storage", version = "7.0.0" } -sp-core = { path = "../../../primitives/core", version = "7.0.0" } -sp-tracing = { path = "../../../primitives/tracing", version = "6.0.0" } -sp-runtime = { path = "../../../primitives/runtime", version = "7.0.0" } -sp-std = { path = "../../../primitives/std", version = "5.0.0" } +sp-storage = { path = "../../../primitives/storage", version = "13.0.0" } +sp-core = { path = "../../../primitives/core", version = "21.0.0" } +sp-tracing = { path = "../../../primitives/tracing", version = "10.0.0" } +sp-runtime = { path = "../../../primitives/runtime", version = "24.0.0" } +sp-std = { path = "../../../primitives/std", version = "8.0.0" } # utils remote-externalities = { path = "../../../utils/frame/remote-externalities", version = "0.10.0-dev", package = "frame-remote-externalities" } diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index 0c6e194d32478..78c5b4e1c7b6d 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -16,7 +16,10 @@ //! Test to execute the snapshot using the voter bag. -use frame_election_provider_support::SortedListProvider; +use frame_election_provider_support::{ + bounds::{CountBound, DataProviderBounds}, + SortedListProvider, +}; use frame_support::traits::PalletInfoAccess; use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; @@ -62,8 +65,13 @@ where ::VoterList::count(), ); + let bounds = match voter_limit { + None => DataProviderBounds::default(), + Some(v) => DataProviderBounds { count: Some(CountBound(v as u32)), size: None }, + }; + let voters = - as ElectionDataProvider>::electing_voters(voter_limit) + as ElectionDataProvider>::electing_voters(bounds) .unwrap(); let mut voters_nominator_only = voters diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index d4d54b9a134bb..a5d3257b734bb 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -15,21 +15,52 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Bags-List Pallet +//! > Made with *Substrate*, for *Polkadot*. //! -//! A semi-sorted list, where items hold an `AccountId` based on some `Score`. The -//! `AccountId` (`id` for short) might be synonym to a `voter` or `nominator` in some context, and -//! `Score` signifies the chance of each id being included in the final -//! [`SortedListProvider::iter`]. +//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) - +//! [![polkadot]](https://polkadot.network) //! -//! It implements [`frame_election_provider_support::SortedListProvider`] to provide a semi-sorted -//! list of accounts to another pallet. It needs some other pallet to give it some information about -//! the weights of accounts via [`frame_election_provider_support::ScoreProvider`]. +//! [polkadot]: +//! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: +//! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! -//! This pallet is not configurable at genesis. Whoever uses it should call appropriate functions of -//! the `SortedListProvider` (e.g. `on_insert`, or `unsafe_regenerate`) at their genesis. +//! # Bags-List Pallet //! -//! # Goals +//! An onchain implementation of a semi-sorted linked list, with permissionless sorting and update +//! operations. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! This pallet provides an implementation of +//! [`frame_election_provider_support::SortedListProvider`] and it can typically be used by another +//! pallet via this API. +//! +//! ## Overview +//! +//! This pallet splits `AccountId`s into different bags. Within a bag, these `AccountId`s are stored +//! as nodes in a linked-list manner. This pallet then provides iteration over all bags, which +//! basically allows an infinitely large list of items to be kept in a sorted manner. +//! +//! Each bags has a upper and lower range of scores, denoted by [`Config::BagThresholds`]. All nodes +//! within a bag must be within the range of the bag. If not, the permissionless [`Pallet::rebag`] +//! can be used to move any node to the right bag. +//! +//! Once a `rebag` happens, the order within a node is still not enforced. To move a node to the +//! optimal position in a bag, the [`Pallet::put_in_front_of`] or [`Pallet::put_in_front_of_other`] +//! can be used. +//! +//! Additional reading, about how this pallet is used in the context of Polkadot's staking system: +//! +//! +//! ## Examples +//! +//! See [`example`] for a diagram of `rebag` and `put_in_front_of` operations. +//! +//! ## Low Level / Implementation Details //! //! The data structure exposed by this pallet aims to be optimized for: //! @@ -37,7 +68,7 @@ //! - iteration over the top* N items by score, where the precise ordering of items doesn't //! particularly matter. //! -//! # Details +//! ### Further Details //! //! - items are kept in bags, which are delineated by their range of score (See //! [`Config::BagThresholds`]). @@ -53,12 +84,53 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(doc)] +#[cfg_attr(doc, aquamarine::aquamarine)] +/// +/// In this example, assuming each node has an equal id and score (eg. node 21 has a score of 21), +/// the node 22 can be moved from bag 1 to bag 0 with the `rebag` operation. +/// +/// Once the whole list is iterated, assuming the above above rebag happens, the order of iteration +/// would be: `25, 21, 22, 12, 22, 5, 7, 3`. +/// +/// Moreover, in bag2, node 7 can be moved to the front of node 5 with the `put_in_front_of`, as it +/// has a higher score. +/// +/// ```mermaid +/// graph LR +/// Bag0 --> Bag1 --> Bag2 +/// +/// subgraph Bag0[Bag 0: 21-30 DOT] +/// direction LR +/// 25 --> 21 --> 22X[22] +/// end +/// +/// subgraph Bag1[Bag 1: 11-20 DOT] +/// direction LR +/// 12 --> 22 +/// end +/// +/// subgraph Bag2[Bag 2: 0-10 DOT] +/// direction LR +/// 5 --> 7 --> 3 +/// end +/// +/// style 22X stroke-dasharray: 5 5,opacity:50% +/// ``` +/// +/// The equivalent of this in code would be: +#[doc = docify::embed!("src/tests.rs", examples_work)] +pub mod example {} + use codec::FullCodec; use frame_election_provider_support::{ScoreProvider, SortedListProvider}; use frame_system::ensure_signed; use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded, StaticLookup}; use sp_std::prelude::*; +#[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] +use sp_runtime::TryRuntimeError; + #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarks; @@ -237,9 +309,11 @@ pub mod pallet { /// Move the caller's Id directly in front of `lighter`. /// /// The dispatch origin for this call must be _Signed_ and can only be called by the Id of - /// the account going in front of `lighter`. + /// the account going in front of `lighter`. Fee is payed by the origin under all + /// circumstances. + /// + /// Only works if: /// - /// Only works if /// - both nodes are within the same bag, /// - and `origin` has a greater `Score` than `lighter`. #[pallet::call_index(1)] @@ -254,6 +328,24 @@ pub mod pallet { .map_err::, _>(Into::into) .map_err::(Into::into) } + + /// Same as [`Pallet::put_in_front_of`], but it can be called by anyone. + /// + /// Fee is paid by the origin under all circumstances. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::put_in_front_of())] + pub fn put_in_front_of_other( + origin: OriginFor, + heavier: AccountIdLookupOf, + lighter: AccountIdLookupOf, + ) -> DispatchResult { + let _ = ensure_signed(origin)?; + let lighter = T::Lookup::lookup(lighter)?; + let heavier = T::Lookup::lookup(heavier)?; + List::::put_in_front_of(&lighter, &heavier) + .map_err::, _>(Into::into) + .map_err::(Into::into) + } } #[pallet::hooks] @@ -267,7 +359,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_: BlockNumberFor) -> Result<(), &'static str> { + fn try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { >::try_state() } } @@ -275,7 +367,7 @@ pub mod pallet { #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] impl, I: 'static> Pallet { - pub fn do_try_state() -> Result<(), &'static str> { + pub fn do_try_state() -> Result<(), TryRuntimeError> { List::::do_try_state() } } @@ -355,7 +447,7 @@ impl, I: 'static> SortedListProvider for Pallet } #[cfg(feature = "try-runtime")] - fn try_state() -> Result<(), &'static str> { + fn try_state() -> Result<(), TryRuntimeError> { Self::do_try_state() } diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index f667f4c101ef8..d8626080e2523 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -42,6 +42,9 @@ use sp_std::{ prelude::*, }; +#[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] +use sp_runtime::TryRuntimeError; + #[derive(Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, PalletError)] pub enum ListError { /// A duplicate id has been detected. @@ -512,11 +515,11 @@ impl, I: 'static> List { /// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure /// all bags and nodes are checked per *any* update to `List`. #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] - pub(crate) fn do_try_state() -> Result<(), &'static str> { + pub(crate) fn do_try_state() -> Result<(), TryRuntimeError> { let mut seen_in_list = BTreeSet::new(); ensure!( Self::iter().map(|node| node.id).all(|id| seen_in_list.insert(id)), - "duplicate identified", + "duplicate identified" ); let iter_count = Self::iter().count() as u32; @@ -750,7 +753,7 @@ impl, I: 'static> Bag { /// * Ensures tail has no next. /// * Ensures there are no loops, traversal from head to tail is correct. #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] - fn do_try_state(&self) -> Result<(), &'static str> { + fn do_try_state(&self) -> Result<(), TryRuntimeError> { frame_support::ensure!( self.head() .map(|head| head.prev().is_none()) @@ -895,15 +898,12 @@ impl, I: 'static> Node { } #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] - fn do_try_state(&self) -> Result<(), &'static str> { + fn do_try_state(&self) -> Result<(), TryRuntimeError> { let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; let id = self.id(); - frame_support::ensure!( - expected_bag.contains(id), - "node does not exist in the expected bag" - ); + frame_support::ensure!(expected_bag.contains(id), "node does not exist in the bag"); let non_terminal_check = !self.is_terminal() && expected_bag.head.as_ref() != Some(id) && diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs index f5afdc24f2608..fd4ad8f893af3 100644 --- a/frame/bags-list/src/list/tests.rs +++ b/frame/bags-list/src/list/tests.rs @@ -22,6 +22,7 @@ use crate::{ }; use frame_election_provider_support::{SortedListProvider, VoteWeight}; use frame_support::{assert_ok, assert_storage_noop}; +use sp_runtime::TryRuntimeError; fn node( id: AccountId, @@ -359,7 +360,10 @@ mod list { // make sure there are no duplicates. ExtBuilder::default().build_and_execute_no_post_check(|| { Bag::::get(10).unwrap().insert_unchecked(2, 10); - assert_eq!(List::::do_try_state(), Err("duplicate identified")); + assert_eq!( + List::::do_try_state(), + TryRuntimeError::Other("duplicate identified").into() + ); }); // ensure count is in sync with `ListNodes::count()`. @@ -373,7 +377,10 @@ mod list { CounterForListNodes::::mutate(|counter| *counter += 1); assert_eq!(crate::ListNodes::::count(), 5); - assert_eq!(List::::do_try_state(), Err("iter_count != stored_count")); + assert_eq!( + List::::do_try_state(), + TryRuntimeError::Other("iter_count != stored_count").into() + ); }); } diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index 5f9bb8f73ac60..7df63a6a44c54 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -24,6 +24,9 @@ use frame_support::traits::OnRuntimeUpgrade; #[cfg(feature = "try-runtime")] use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; @@ -35,7 +38,7 @@ impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { // The old explicit storage item. #[frame_support::storage_alias] type CounterForListNodes, I: 'static> = @@ -88,7 +91,7 @@ mod old { pub struct AddScore, I: 'static = ()>(sp_std::marker::PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for AddScore { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { // The list node data should be corrupt at this point, so this is zero. ensure!(crate::ListNodes::::iter().count() == 0, "list node data is not corrupt"); // We can use the helper `old::ListNode` to get the existing data. @@ -119,7 +122,7 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { } #[cfg(feature = "try-runtime")] - fn post_upgrade(node_count_before: Vec) -> Result<(), &'static str> { + fn post_upgrade(node_count_before: Vec) -> Result<(), TryRuntimeError> { let node_count_before: u32 = Decode::decode(&mut node_count_before.as_slice()) .expect("the state parameter should be something that was generated by pre_upgrade"); // Now the list node data is not corrupt anymore. diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index efbb2ed94c49f..ae50adabd508a 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -21,6 +21,7 @@ use super::*; use crate::{self as bags_list}; use frame_election_provider_support::VoteWeight; use frame_support::parameter_types; +use sp_runtime::BuildStorage; use std::collections::HashMap; pub type AccountId = u32; @@ -51,14 +52,13 @@ impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); @@ -86,15 +86,11 @@ impl bags_list::Config for Runtime { type Score = VoteWeight; } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Storage, Event, Config}, BagsList: bags_list::{Pallet, Call, Storage, Event}, } ); @@ -127,7 +123,7 @@ impl ExtBuilder { pub(crate) fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - let storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let ids_with_weight: Vec<_> = if self.skip_genesis_ids { self.ids.iter().collect() diff --git a/frame/bags-list/src/tests.rs b/frame/bags-list/src/tests.rs index 74f1491835a32..9e8508698d8e8 100644 --- a/frame/bags-list/src/tests.rs +++ b/frame/bags-list/src/tests.rs @@ -22,6 +22,62 @@ use frame_election_provider_support::{SortedListProvider, VoteWeight}; use list::Bag; use mock::{test_utils::*, *}; +#[docify::export] +#[test] +fn examples_work() { + ExtBuilder::default() + .skip_genesis_ids() + // initially set the score of 11 for 22 to push it next to 12 + .add_ids(vec![(25, 25), (21, 21), (12, 12), (22, 11), (5, 5), (7, 7), (3, 3)]) + .build_and_execute(|| { + // initial bags + assert_eq!( + List::::get_bags(), + vec![ + // bag 0 -> 10 + (10, vec![5, 7, 3]), + // bag 10 -> 20 + (20, vec![12, 22]), + // bag 20 -> 30 + (30, vec![25, 21]) + ] + ); + + // set score of 22 to 22 + StakingMock::set_score_of(&22, 22); + + // now we rebag 22 to the first bag + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(42), 22)); + + assert_eq!( + List::::get_bags(), + vec![ + // bag 0 -> 10 + (10, vec![5, 7, 3]), + // bag 10 -> 20 + (20, vec![12]), + // bag 20 -> 30 + (30, vec![25, 21, 22]) + ] + ); + + // now we put 7 at the front of bag 0 + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(7), 5)); + + assert_eq!( + List::::get_bags(), + vec![ + // bag 0 -> 10 + (10, vec![7, 5, 3]), + // bag 10 -> 20 + (20, vec![12]), + // bag 20 -> 30 + (30, vec![25, 21, 22]) + ] + ); + }) +} + mod pallet { use super::*; @@ -207,6 +263,25 @@ mod pallet { }) } + #[test] + fn put_in_front_of_other_can_be_permissionless() { + ExtBuilder::default() + .skip_genesis_ids() + .add_ids(vec![(10, 15), (11, 16), (12, 19)]) + .build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(20, vec![10, 11, 12])]); + // 11 now has more weight than 10 and can be moved before it. + StakingMock::set_score_of(&11u32, 17); + + // when + assert_ok!(BagsList::put_in_front_of_other(RuntimeOrigin::signed(42), 11u32, 10)); + + // then + assert_eq!(List::::get_bags(), vec![(20, vec![11, 10, 12])]); + }); + } + #[test] fn put_in_front_of_two_node_bag_heavier_is_tail() { ExtBuilder::default() @@ -368,7 +443,7 @@ mod pallet { StakingMock::set_score_of(&4, 999); // when - BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4).unwrap(); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4])]); diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index f2b65beba2c80..d929c6bb95963 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_bags_list //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_bags_list +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_bags_list. pub trait WeightInfo { @@ -68,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1724` // Estimated: `11506` - // Minimum execution time: 63_335_000 picoseconds. - Weight::from_parts(64_097_000, 11506) + // Minimum execution time: 62_137_000 picoseconds. + Weight::from_parts(64_050_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -85,8 +89,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1618` // Estimated: `8877` - // Minimum execution time: 62_151_000 picoseconds. - Weight::from_parts(62_827_000, 8877) + // Minimum execution time: 60_880_000 picoseconds. + Weight::from_parts(62_078_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -104,8 +108,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1930` // Estimated: `11506` - // Minimum execution time: 69_179_000 picoseconds. - Weight::from_parts(69_898_000, 11506) + // Minimum execution time: 68_911_000 picoseconds. + Weight::from_parts(70_592_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -125,8 +129,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1724` // Estimated: `11506` - // Minimum execution time: 63_335_000 picoseconds. - Weight::from_parts(64_097_000, 11506) + // Minimum execution time: 62_137_000 picoseconds. + Weight::from_parts(64_050_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -142,8 +146,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1618` // Estimated: `8877` - // Minimum execution time: 62_151_000 picoseconds. - Weight::from_parts(62_827_000, 8877) + // Minimum execution time: 60_880_000 picoseconds. + Weight::from_parts(62_078_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -161,8 +165,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1930` // Estimated: `11506` - // Minimum execution time: 69_179_000 picoseconds. - Weight::from_parts(69_898_000, 11506) + // Minimum execution time: 68_911_000 picoseconds. + Weight::from_parts(70_592_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 53ea04fc12ea3..549ed3936a9e1 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } paste = "1.0.12" [features] @@ -39,8 +39,21 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "pallet-transaction-payment/std", + "sp-core/std", + "sp-io/std" ] # Enable support for setting the existential deposit to zero. insecure_zero_ed = [] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/balances/README.md b/frame/balances/README.md index dd56ab3fadfb5..fa1ee622d48ce 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -94,7 +94,7 @@ The Staking module uses the `LockableCurrency` trait to lock a stash account's f use frame_support::traits::{WithdrawReasons, LockableCurrency}; use sp_runtime::traits::Bounded; pub trait Config: frame_system::Config { - type Currency: LockableCurrency; + type Currency: LockableCurrency>; } fn update_ledger( diff --git a/frame/balances/src/impl_currency.rs b/frame/balances/src/impl_currency.rs index 9f764a37b8b89..2cbe776c51297 100644 --- a/frame/balances/src/impl_currency.rs +++ b/frame/balances/src/impl_currency.rs @@ -32,6 +32,7 @@ use frame_support::{ ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, }, }; +use frame_system::pallet_prelude::BlockNumberFor; pub use imbalances::{NegativeImbalance, PositiveImbalance}; // wrapping these imbalances in a private module is necessary to ensure absolute privacy @@ -842,21 +843,22 @@ impl, I: 'static> LockableCurrency for Pallet where T::Balance: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; + type Moment = BlockNumberFor; type MaxLocks = T::MaxLocks; - // Set a lock on the balance of `who`. - // Is a no-op if lock amount is zero or `reasons` `is_none()`. + // Set or alter a lock on the balance of `who`. fn set_lock( id: LockIdentifier, who: &T::AccountId, amount: T::Balance, reasons: WithdrawReasons, ) { - if amount.is_zero() || reasons.is_empty() { + if reasons.is_empty() || amount.is_zero() { + Self::remove_lock(id, who); return } + let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); let mut locks = Self::locks(who) .into_iter() diff --git a/frame/balances/src/impl_fungible.rs b/frame/balances/src/impl_fungible.rs index f8f8fe17ae0ef..03c40bb3a8401 100644 --- a/frame/balances/src/impl_fungible.rs +++ b/frame/balances/src/impl_fungible.rs @@ -207,7 +207,7 @@ impl, I: 'static> fungible::Mutate for Pallet { impl, I: 'static> fungible::MutateHold for Pallet {} impl, I: 'static> fungible::InspectHold for Pallet { - type Reason = T::HoldIdentifier; + type Reason = T::RuntimeHoldReason; fn total_balance_on_hold(who: &T::AccountId) -> T::Balance { Self::account(who).reserved diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 7ed725a77df0c..90bee7d99e121 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -120,7 +120,7 @@ //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; //! pub trait Config: frame_system::Config { -//! type Currency: LockableCurrency; +//! type Currency: LockableCurrency>; //! } //! # struct StakingLedger { //! # stash: ::AccountId, @@ -166,8 +166,6 @@ mod types; pub mod weights; use codec::{Codec, MaxEncodedLen}; -#[cfg(feature = "std")] -use frame_support::traits::GenesisBuild; use frame_support::{ ensure, pallet_prelude::DispatchResult, @@ -193,7 +191,9 @@ use sp_runtime::{ ArithmeticError, DispatchError, FixedPointOperand, Perbill, RuntimeDebug, TokenError, }; use sp_std::{cmp, fmt::Debug, mem, prelude::*, result}; -pub use types::{AccountData, BalanceLock, DustCleaner, IdAmount, Reasons, ReserveData}; +pub use types::{ + AccountData, BalanceLock, DustCleaner, ExtraFlags, IdAmount, Reasons, ReserveData, +}; pub use weights::WeightInfo; pub use pallet::*; @@ -213,7 +213,33 @@ pub mod pallet { pub type CreditOf = Credit<::AccountId, Pallet>; - #[pallet::config] + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::derive_impl; + + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type Balance = u64; + + type ReserveIdentifier = (); + type FreezeIdentifier = (); + + type MaxLocks = (); + type MaxReserves = (); + type MaxFreezes = (); + type MaxHolds = (); + + type WeightInfo = (); + } + } + + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From> @@ -236,6 +262,7 @@ pub mod pallet { + FixedPointOperand; /// Handler for the unbalanced reduction when removing a dust account. + #[pallet::no_default] type DustRemoval: OnUnbalanced>; /// The minimum amount required to keep an account open. MUST BE GREATER THAN ZERO! @@ -247,9 +274,11 @@ pub mod pallet { /// /// Bottom line: Do yourself a favour and make it at least one! #[pallet::constant] + #[pallet::no_default] type ExistentialDeposit: Get; /// The means of storing the balances of an account. + #[pallet::no_default] type AccountStore: StoredMap>; /// The ID type for reserves. @@ -257,8 +286,9 @@ pub mod pallet { /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; - /// The ID type for holds. - type HoldIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + /// The overarching hold reason. + #[pallet::no_default] + type RuntimeHoldReason: Parameter + Member + MaxEncodedLen + Ord + Copy; /// The ID type for freezes. type FreezeIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; @@ -437,7 +467,7 @@ pub mod pallet { _, Blake2_128Concat, T::AccountId, - BoundedVec, T::MaxHolds>, + BoundedVec, T::MaxHolds>, ValueQuery, >; @@ -463,7 +493,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { let total = self.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); @@ -497,25 +527,8 @@ pub mod pallet { } } - #[cfg(feature = "std")] - impl, I: 'static> GenesisConfig { - /// Direct implementation of `GenesisBuild::build_storage`. - /// - /// Kept in order not to break dependency. - pub fn build_storage(&self) -> Result { - >::build_storage(self) - } - - /// Direct implementation of `GenesisBuild::assimilate_storage`. - /// - /// Kept in order not to break dependency. - pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { - >::assimilate_storage(self, storage) - } - } - #[pallet::hooks] - impl, I: 'static> Hooks for Pallet { + impl, I: 'static> Hooks> for Pallet { #[cfg(not(feature = "insecure_zero_ed"))] fn integrity_test() { assert!( @@ -539,11 +552,11 @@ pub mod pallet { origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; >::transfer(&source, &dest, value, Expendable)?; - Ok(().into()) + Ok(()) } /// Set the regular balance of a given account; it also takes a reserved balance but this @@ -562,7 +575,7 @@ pub mod pallet { who: AccountIdLookupOf, #[pallet::compact] new_free: T::Balance, #[pallet::compact] old_reserved: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; let existential_deposit = Self::ed(); @@ -590,7 +603,7 @@ pub mod pallet { } Self::deposit_event(Event::BalanceSet { who, free: new_free }); - Ok(().into()) + Ok(()) } /// Exactly as `transfer_allow_death`, except the origin must be root and the source account @@ -601,12 +614,12 @@ pub mod pallet { source: AccountIdLookupOf, dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; let source = T::Lookup::lookup(source)?; let dest = T::Lookup::lookup(dest)?; >::transfer(&source, &dest, value, Expendable)?; - Ok(().into()) + Ok(()) } /// Same as the [`transfer_allow_death`] call, but with a check that the transfer will not @@ -620,11 +633,11 @@ pub mod pallet { origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; >::transfer(&source, &dest, value, Preserve)?; - Ok(().into()) + Ok(()) } /// Transfer the entire transferable balance from the caller account. @@ -722,11 +735,11 @@ pub mod pallet { origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; >::transfer(&source, &dest, value, Expendable)?; - Ok(().into()) + Ok(()) } /// Set the regular balance of a given account. @@ -741,7 +754,7 @@ pub mod pallet { origin: OriginFor, who: AccountIdLookupOf, #[pallet::compact] new_free: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; let existential_deposit = Self::ed(); @@ -765,7 +778,7 @@ pub mod pallet { } Self::deposit_event(Event::BalanceSet { who, free: new_free }); - Ok(().into()) + Ok(()) } } diff --git a/frame/balances/src/tests/currency_tests.rs b/frame/balances/src/tests/currency_tests.rs index e25b122c1fcf0..c9ad19f79e36d 100644 --- a/frame/balances/src/tests/currency_tests.rs +++ b/frame/balances/src/tests/currency_tests.rs @@ -33,6 +33,30 @@ const ID_2: LockIdentifier = *b"2 "; pub const CALL: &::RuntimeCall = &RuntimeCall::Balances(crate::Call::transfer_allow_death { dest: 0, value: 0 }); +#[test] +fn set_lock_with_amount_zero_removes_lock() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, 0, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + +#[test] +fn set_lock_with_withdraw_reasons_empty_removes_lock() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); + assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); + }); +} + #[test] fn basic_locking_should_work() { ExtBuilder::default() @@ -642,7 +666,7 @@ fn burn_must_work() { #[should_panic = "the balance of any account should always be at least the existential deposit."] fn cannot_set_genesis_value_below_ed() { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = 11); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = crate::GenesisConfig:: { balances: vec![(1, 10)] } .assimilate_storage(&mut t) .unwrap(); @@ -651,7 +675,7 @@ fn cannot_set_genesis_value_below_ed() { #[test] #[should_panic = "duplicate balances in genesis."] fn cannot_set_genesis_value_twice() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = crate::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (1, 15)] } .assimilate_storage(&mut t) .unwrap(); diff --git a/frame/balances/src/tests/mod.rs b/frame/balances/src/tests/mod.rs index 4731dbf8ed32b..45f34110a6e9e 100644 --- a/frame/balances/src/tests/mod.rs +++ b/frame/balances/src/tests/mod.rs @@ -38,9 +38,8 @@ use scale_info::TypeInfo; use sp_core::{hexdisplay::HexDisplay, H256}; use sp_io; use sp_runtime::{ - testing::Header, traits::{BadOrigin, IdentityLookup, SignedExtension, Zero}, - ArithmeticError, DispatchError, DispatchResult, FixedPointNumber, TokenError, + ArithmeticError, BuildStorage, DispatchError, DispatchResult, FixedPointNumber, TokenError, }; use std::collections::BTreeSet; @@ -50,7 +49,6 @@ mod fungible_conformance_tests; mod fungible_tests; mod reentrancy_tests; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; #[derive( @@ -73,12 +71,9 @@ pub enum TestId { } frame_support::construct_runtime!( - pub struct Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, } @@ -97,14 +92,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -137,7 +131,7 @@ impl Config for Test { type MaxReserves = ConstU32<2>; type ReserveIdentifier = TestId; type WeightInfo = (); - type HoldIdentifier = TestId; + type RuntimeHoldReason = TestId; type FreezeIdentifier = TestId; type MaxFreezes = ConstU32<2>; type MaxHolds = ConstU32<2>; @@ -176,7 +170,7 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: if self.monied { vec![ diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index f35d9c697028b..5671374948cdf 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,33 +18,34 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-07-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-o7yfgx5n-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_balances // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/balances/src/weights.rs +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_balances +// --chain=dev // --header=./HEADER-APACHE2 +// --output=./frame/balances/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_balances. pub trait WeightInfo { @@ -61,190 +62,190 @@ pub trait WeightInfo { /// Weights for pallet_balances using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 59_458_000 picoseconds. - Weight::from_parts(60_307_000, 3593) + // Minimum execution time: 58_474_000 picoseconds. + Weight::from_parts(59_117_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 43_056_000 picoseconds. - Weight::from_parts(43_933_000, 3593) + // Minimum execution time: 44_629_000 picoseconds. + Weight::from_parts(45_798_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_428_000 picoseconds. - Weight::from_parts(17_731_000, 3593) + // Minimum execution time: 16_483_000 picoseconds. + Weight::from_parts(16_939_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_225_000, 3593) + // Minimum execution time: 24_638_000 picoseconds. + Weight::from_parts(25_487_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 56_929_000 picoseconds. - Weight::from_parts(57_688_000, 6196) + // Minimum execution time: 60_041_000 picoseconds. + Weight::from_parts(63_365_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 49_820_000 picoseconds. - Weight::from_parts(50_832_000, 3593) + // Minimum execution time: 54_445_000 picoseconds. + Weight::from_parts(55_623_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 20_270_000 picoseconds. - Weight::from_parts(20_597_000, 3593) + // Minimum execution time: 19_309_000 picoseconds. + Weight::from_parts(19_953_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:999 w:999) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `u` is `[1, 1000]`. fn upgrade_accounts(u: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 19_847_000 picoseconds. - Weight::from_parts(20_053_000, 990) - // Standard Error: 11_643 - .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) + // Minimum execution time: 19_362_000 picoseconds. + Weight::from_parts(19_612_000, 990) + // Standard Error: 13_108 + .saturating_add(Weight::from_parts(16_444_591, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 59_458_000 picoseconds. - Weight::from_parts(60_307_000, 3593) + // Minimum execution time: 58_474_000 picoseconds. + Weight::from_parts(59_117_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 43_056_000 picoseconds. - Weight::from_parts(43_933_000, 3593) + // Minimum execution time: 44_629_000 picoseconds. + Weight::from_parts(45_798_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_428_000 picoseconds. - Weight::from_parts(17_731_000, 3593) + // Minimum execution time: 16_483_000 picoseconds. + Weight::from_parts(16_939_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_225_000, 3593) + // Minimum execution time: 24_638_000 picoseconds. + Weight::from_parts(25_487_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 56_929_000 picoseconds. - Weight::from_parts(57_688_000, 6196) + // Minimum execution time: 60_041_000 picoseconds. + Weight::from_parts(63_365_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 49_820_000 picoseconds. - Weight::from_parts(50_832_000, 3593) + // Minimum execution time: 54_445_000 picoseconds. + Weight::from_parts(55_623_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 20_270_000 picoseconds. - Weight::from_parts(20_597_000, 3593) + // Minimum execution time: 19_309_000 picoseconds. + Weight::from_parts(19_953_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:999 w:999) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `u` is `[1, 1000]`. fn upgrade_accounts(u: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 19_847_000 picoseconds. - Weight::from_parts(20_053_000, 990) - // Standard Error: 11_643 - .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) + // Minimum execution time: 19_362_000 picoseconds. + Weight::from_parts(19_612_000, 990) + // Standard Error: 13_108 + .saturating_add(Weight::from_parts(16_444_591, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index 9840162149a38..5691a5d735272 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -9,11 +9,11 @@ repository = "https://github.com/paritytech/substrate" homepage = "https://substrate.io" [dependencies] -array-bytes = { version = "4.1", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +array-bytes = { version = "6.1", optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } binary-merkle-tree = { version = "4.0.0-dev", default-features = false, path = "../../utils/binary-merkle-tree" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -21,14 +21,14 @@ pallet-beefy = { version = "4.0.0-dev", default-features = false, path = "../bee pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } [features] @@ -51,5 +51,13 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-api/std", + "sp-staking/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-beefy/try-runtime", + "pallet-mmr/try-runtime", + "pallet-session/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index b9d4bdfd8eb59..b12eb95f650f7 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -43,6 +43,7 @@ use sp_consensus_beefy::{ }; use frame_support::{crypto::ecdsa::ECDSAExt, traits::Get}; +use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; @@ -56,10 +57,10 @@ pub struct DepositBeefyDigest(sp_std::marker::PhantomData); impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest where - T: pallet_mmr::Config, + T: pallet_mmr::Config, T: pallet_beefy::Config, { - fn on_new_root(root: &::Hash) { + fn on_new_root(root: &sp_consensus_beefy::MmrRootHash) { let digest = sp_runtime::generic::DigestItem::Consensus( sp_consensus_beefy::BEEFY_ENGINE_ID, codec::Encode::encode(&sp_consensus_beefy::ConsensusLog::< @@ -72,8 +73,8 @@ where /// Convert BEEFY secp256k1 public keys into Ethereum addresses pub struct BeefyEcdsaToEthereum; -impl Convert> for BeefyEcdsaToEthereum { - fn convert(beefy_id: sp_consensus_beefy::crypto::AuthorityId) -> Vec { +impl Convert> for BeefyEcdsaToEthereum { + fn convert(beefy_id: sp_consensus_beefy::ecdsa_crypto::AuthorityId) -> Vec { sp_core::ecdsa::Public::from(beefy_id) .to_eth_address() .map(|v| v.to_vec()) @@ -84,7 +85,7 @@ impl Convert> for BeefyEcdsaToE } } -type MerkleRootOf = ::Hash; +type MerkleRootOf = <::Hashing as sp_runtime::traits::Hash>::Output; #[frame_support::pallet] pub mod pallet { @@ -139,7 +140,7 @@ pub mod pallet { impl LeafDataProvider for Pallet { type LeafData = MmrLeaf< - ::BlockNumber, + BlockNumberFor, ::Hash, MerkleRootOf, T::LeafExtra, @@ -199,11 +200,12 @@ impl Pallet { .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; - let root = binary_merkle_tree::merkle_root::<::Hashing, _>( - beefy_addresses, - ) + let keyset_commitment = binary_merkle_tree::merkle_root::< + ::Hashing, + _, + >(beefy_addresses) .into(); - BeefyAuthoritySet { id, len, root } + BeefyAuthoritySet { id, len, keyset_commitment } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 8b3bedcb960b4..2c37ad4483f6b 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -21,22 +21,22 @@ use codec::Encode; use frame_support::{ construct_runtime, parameter_types, sp_io::TestExternalities, - traits::{ConstU16, ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU16, ConstU32, ConstU64}, BasicExternalities, }; use sp_consensus_beefy::mmr::MmrLeafVersion; -use sp_core::{Hasher, H256}; +use sp_core::H256; use sp_runtime::{ app_crypto::ecdsa::Public, impl_opaque_keys, - testing::Header, traits::{BlakeTwo256, ConvertInto, IdentityLookup, Keccak256, OpaqueKeys}, + BuildStorage, }; use crate as pallet_beefy_mmr; pub use sp_consensus_beefy::{ - crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider, ConsensusLog, BEEFY_ENGINE_ID, + ecdsa_crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider, ConsensusLog, BEEFY_ENGINE_ID, }; impl_opaque_keys! { @@ -45,16 +45,12 @@ impl_opaque_keys! { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, Mmr: pallet_mmr::{Pallet, Storage}, Beefy: pallet_beefy::{Pallet, Config, Storage}, @@ -68,14 +64,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -102,9 +97,9 @@ impl pallet_session::Config for Test { } pub type MmrLeaf = sp_consensus_beefy::mmr::MmrLeaf< - ::BlockNumber, + frame_system::pallet_prelude::BlockNumberFor, ::Hash, - ::Hash, + crate::MerkleRootOf, Vec, >; @@ -113,8 +108,6 @@ impl pallet_mmr::Config for Test { type Hashing = Keccak256; - type Hash = ::Out; - type LeafData = BeefyMmr; type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; @@ -125,6 +118,7 @@ impl pallet_mmr::Config for Test { impl pallet_beefy::Config for Test { type BeefyId = BeefyId; type MaxAuthorities = ConstU32<100>; + type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = ConstU64<100>; type OnNewValidatorSet = BeefyMmr; type WeightInfo = (); @@ -195,7 +189,7 @@ pub fn new_test_ext(ids: Vec) -> TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let session_keys: Vec<_> = authorities .iter() diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index dc2e46f31fe64..ec756f83dffa4 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -70,7 +70,7 @@ fn should_contain_mmr_digest() { ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "200e73880940ac0b66735ffb560fa0a3989292463d262deac6ad61e78a3e46a4" + "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" ))) ] ); @@ -85,13 +85,13 @@ fn should_contain_mmr_digest() { ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "200e73880940ac0b66735ffb560fa0a3989292463d262deac6ad61e78a3e46a4" + "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" ))), beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(3), mock_beefy_id(4)], 2).unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "ba37d8d5d195ac8caec391da35472f9ecf1116ff1642409148b62e08896d3884" + "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" ))), ] ); @@ -119,12 +119,12 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 2, len: 2, - root: array_bytes::hex_n_into_unchecked( + keyset_commitment: array_bytes::hex_n_into_unchecked( "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" ) }, leaf_extra: array_bytes::hex2bytes_unchecked( - "5572d58c82bddf323f4fc7aecab8a8f0ad6ed2f06ab2bfb8ade36a77a45fcc68" + "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" ) } ); @@ -144,12 +144,12 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 3, len: 2, - root: array_bytes::hex_n_into_unchecked( + keyset_commitment: array_bytes::hex_n_into_unchecked( "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" ) }, leaf_extra: array_bytes::hex2bytes_unchecked( - "5572d58c82bddf323f4fc7aecab8a8f0ad6ed2f06ab2bfb8ade36a77a45fcc68" + "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" ) } ); @@ -164,15 +164,15 @@ fn should_update_authorities() { // check current authority set assert_eq!(0, auth_set.id); assert_eq!(2, auth_set.len); - let want = array_bytes::hex_n_into_unchecked::( + let want = array_bytes::hex_n_into_unchecked::<_, H256, 32>( "176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96", ); - assert_eq!(want, auth_set.root); + assert_eq!(want, auth_set.keyset_commitment); // next authority set should have same validators but different id assert_eq!(1, next_auth_set.id); assert_eq!(auth_set.len, next_auth_set.len); - assert_eq!(auth_set.root, next_auth_set.root); + assert_eq!(auth_set.keyset_commitment, next_auth_set.keyset_commitment); let announced_set = next_auth_set; init_block(1); @@ -184,11 +184,11 @@ fn should_update_authorities() { assert_eq!(1, auth_set.id); // check next auth set assert_eq!(2, next_auth_set.id); - let want = array_bytes::hex_n_into_unchecked::( + let want = array_bytes::hex_n_into_unchecked::<_, H256, 32>( "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", ); assert_eq!(2, next_auth_set.len); - assert_eq!(want, next_auth_set.root); + assert_eq!(want, next_auth_set.keyset_commitment); let announced_set = next_auth_set; init_block(2); @@ -200,10 +200,10 @@ fn should_update_authorities() { assert_eq!(2, auth_set.id); // check next auth set assert_eq!(3, next_auth_set.id); - let want = array_bytes::hex_n_into_unchecked::( + let want = array_bytes::hex_n_into_unchecked::<_, H256, 32>( "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", ); assert_eq!(2, next_auth_set.len); - assert_eq!(want, next_auth_set.root); + assert_eq!(want, next_auth_set.keyset_commitment); }); } diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml index 21c96ff952731..9f331f74f778f 100644 --- a/frame/beefy/Cargo.toml +++ b/frame/beefy/Cargo.toml @@ -9,18 +9,18 @@ description = "BEEFY FRAME pallet" homepage = "https://substrate.io" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +serde = { version = "1.0.163", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } -sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy", features = ["serde"] } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } @@ -29,8 +29,8 @@ pallet-offences = { version = "4.0.0-dev", path = "../offences" } pallet-staking = { version = "4.0.0-dev", path = "../staking" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } [features] @@ -42,11 +42,29 @@ std = [ "pallet-authorship/std", "pallet-session/std", "scale-info/std", - "serde", + "serde/std", "sp-consensus-beefy/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", + "frame-election-provider-support/std", + "pallet-balances/std", + "pallet-offences/std", + "pallet-staking/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-io/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-offences/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/beefy/src/default_weights.rs b/frame/beefy/src/default_weights.rs index b15f1c88f9611..091d58f47f978 100644 --- a/frame/beefy/src/default_weights.rs +++ b/frame/beefy/src/default_weights.rs @@ -24,14 +24,11 @@ use frame_support::weights::{ }; impl crate::WeightInfo for () { - fn report_equivocation(validator_count: u32) -> Weight { + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. let validator_count = validator_count.max(100) as u64; - // worst case we are considering is that the given offender is backed by 200 nominators - const MAX_NOMINATORS: u64 = 200; - // checking membership proof Weight::from_parts(35u64 * WEIGHT_REF_TIME_PER_MICROS, 0) .saturating_add( @@ -44,11 +41,11 @@ impl crate::WeightInfo for () { // report offence .saturating_add(Weight::from_parts(110u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) .saturating_add(Weight::from_parts( - 25u64 * WEIGHT_REF_TIME_PER_MICROS * MAX_NOMINATORS, + 25u64 * WEIGHT_REF_TIME_PER_MICROS * max_nominators_per_validator as u64, 0, )) - .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().reads(14 + 3 * max_nominators_per_validator as u64)) + .saturating_add(DbWeight::get().writes(10 + 3 * max_nominators_per_validator as u64)) // fetching set id -> session index mappings .saturating_add(DbWeight::get().reads(2)) } diff --git a/frame/beefy/src/equivocation.rs b/frame/beefy/src/equivocation.rs index f83b037dcd26e..84c62fc47f50a 100644 --- a/frame/beefy/src/equivocation.rs +++ b/frame/beefy/src/equivocation.rs @@ -39,8 +39,9 @@ use frame_support::{ log, traits::{Get, KeyOwnerProofSystem}, }; +use frame_system::pallet_prelude::BlockNumberFor; use log::{error, info}; -use sp_consensus_beefy::{EquivocationProof, ValidatorSetId, KEY_TYPE}; +use sp_consensus_beefy::{EquivocationProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -117,7 +118,7 @@ where /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactioinsTypes`. +/// `offchain::SendTransactionTypes`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -126,7 +127,7 @@ pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, /// Equivocation evidence convenience alias. pub type EquivocationEvidenceFor = ( EquivocationProof< - ::BlockNumber, + BlockNumberFor, ::BeefyId, <::BeefyId as RuntimeAppPublic>::Signature, >, @@ -140,7 +141,7 @@ where R: ReportOffence< T::AccountId, P::IdentificationTuple, - EquivocationOffence, + EquivocationOffence>, >, P: KeyOwnerProofSystem<(KeyTypeId, T::BeefyId), Proof = T::KeyOwnerProof>, P::IdentificationTuple: Clone, @@ -171,7 +172,7 @@ where let (equivocation_proof, key_owner_proof) = evidence; // Check the membership proof to extract the offender's id - let key = (KEY_TYPE, equivocation_proof.offender_id().clone()); + let key = (BEEFY_KEY_TYPE, equivocation_proof.offender_id().clone()); let offender = P::check_proof(key, key_owner_proof).ok_or(InvalidTransaction::BadProof)?; // Check if the offence has already been reported, and if so then we can discard the report. @@ -205,7 +206,7 @@ where let validator_set_count = key_owner_proof.validator_count(); // Validate the key ownership proof extracting the id of the offender. - let offender = P::check_proof((KEY_TYPE, offender), key_owner_proof) + let offender = P::check_proof((BEEFY_KEY_TYPE, offender), key_owner_proof) .ok_or(Error::::InvalidKeyOwnershipProof)?; // Validate equivocation proof (check votes are different and signatures are valid). diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 6a248da2bfe97..35d3273e1ef76 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -78,6 +78,10 @@ pub mod pallet { #[pallet::constant] type MaxAuthorities: Get; + /// The maximum number of nominators for each validator. + #[pallet::constant] + type MaxNominators: Get; + /// The maximum number of entries to keep in the set id to session index mapping. /// /// Since the `SetIdSession` map is only used for validating equivocations this @@ -112,7 +116,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); /// The current authorities set #[pallet::storage] @@ -148,8 +152,8 @@ pub mod pallet { StorageMap<_, Twox64Concat, sp_consensus_beefy::ValidatorSetId, SessionIndex>; /// Block number where BEEFY consensus is enabled/started. - /// If changing this, make sure `Self::ValidatorSetId` is also reset to - /// `GENESIS_AUTHORITY_SET_ID` in the state of the new block number configured here. + /// By changing this (through governance or sudo), BEEFY consensus is effectively + /// restarted from the new block number. #[pallet::storage] #[pallet::getter(fn genesis_block)] pub(super) type GenesisBlock = @@ -176,7 +180,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize(&self.authorities) // we panic here as runtime maintainers can simply reconfigure genesis and restart @@ -203,7 +207,10 @@ pub mod pallet { /// against the extracted offender. If both are valid, the offence /// will be reported. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + #[pallet::weight(T::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] pub fn report_equivocation( origin: OriginFor, equivocation_proof: Box< @@ -235,7 +242,10 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + #[pallet::weight(T::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] pub fn report_equivocation_unsigned( origin: OriginFor, equivocation_proof: Box< @@ -441,5 +451,5 @@ impl IsMember for Pallet { } pub trait WeightInfo { - fn report_equivocation(validator_count: u32) -> Weight; + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; } diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index 6b6ffd6751fbe..f2d8415bc01f7 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -17,13 +17,14 @@ use std::vec; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ construct_runtime, parameter_types, sp_io::TestExternalities, - traits::{ - ConstU16, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize, - }, + traits::{ConstU16, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, BasicExternalities, }; use pallet_session::historical as pallet_session_historical; @@ -32,16 +33,16 @@ use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, - testing::{Header, TestXt}, + testing::TestXt, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, - Perbill, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use crate as pallet_beefy; pub use sp_consensus_beefy::{ - crypto::{AuthorityId as BeefyId, AuthoritySignature as BeefySignature}, + ecdsa_crypto::{AuthorityId as BeefyId, AuthoritySignature as BeefySignature}, ConsensusLog, EquivocationProof, BEEFY_ENGINE_ID, }; @@ -51,14 +52,10 @@ impl_opaque_keys! { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Authorship: pallet_authorship, @@ -78,14 +75,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -117,6 +113,7 @@ parameter_types! { impl pallet_beefy::Config for Test { type BeefyId = BeefyId; type MaxAuthorities = ConstU32<100>; + type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type OnNewValidatorSet = (); type WeightInfo = (); @@ -161,7 +158,7 @@ impl pallet_balances::Config for Test { type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); type FreezeIdentifier = (); type MaxFreezes = (); @@ -190,6 +187,7 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); + pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -199,14 +197,12 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBoundsOnChain; } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type RewardRemainder = (); - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; @@ -226,9 +222,10 @@ impl pallet_staking::Config for Test { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = (); + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -260,7 +257,7 @@ pub fn new_test_ext(ids: Vec) -> TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: Vec) -> TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); diff --git a/frame/beefy/src/tests.rs b/frame/beefy/src/tests.rs index f9da20e90dc74..e04dc330d0c07 100644 --- a/frame/beefy/src/tests.rs +++ b/frame/beefy/src/tests.rs @@ -20,7 +20,7 @@ use std::vec; use codec::Encode; use sp_consensus_beefy::{ check_equivocation_proof, generate_equivocation_proof, known_payloads::MMR_ROOT_ID, - Keyring as BeefyKeyring, Payload, ValidatorSet, + Keyring as BeefyKeyring, Payload, ValidatorSet, KEY_TYPE as BEEFY_KEY_TYPE, }; use sp_runtime::DigestItem; @@ -297,8 +297,7 @@ fn report_equivocation_current_set_works() { ); // create the key ownership proof - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); // report the equivocation and the tx should be dispatched successfully assert_ok!(Beefy::report_equivocation_unsigned( @@ -354,8 +353,7 @@ fn report_equivocation_old_set_works() { let equivocation_key = &authorities[equivocation_authority_index]; // create the key ownership proof in the "old" set - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); start_era(2); @@ -436,8 +434,7 @@ fn report_equivocation_invalid_set_id() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); @@ -475,8 +472,7 @@ fn report_equivocation_invalid_session() { let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); // generate a key ownership proof at current era set id - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); start_era(2); @@ -520,7 +516,7 @@ fn report_equivocation_invalid_key_owner_proof() { // generate a key ownership proof for the authority at index 1 let invalid_key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &invalid_owner_key)).unwrap(); + Historical::prove((BEEFY_KEY_TYPE, &invalid_owner_key)).unwrap(); let equivocation_authority_index = 0; let equivocation_key = &authorities[equivocation_authority_index]; @@ -568,8 +564,7 @@ fn report_equivocation_invalid_equivocation_proof() { let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); // generate a key ownership proof at set id in era 1 - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( @@ -649,8 +644,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { (block_num, payload2, set_id, &equivocation_keyring), ); - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); let call = Call::report_equivocation_unsigned { equivocation_proof: Box::new(equivocation_proof.clone()), @@ -716,7 +710,7 @@ fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -724,7 +718,7 @@ fn report_equivocation_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); @@ -755,8 +749,7 @@ fn valid_equivocation_reports_dont_pay_fees() { ); // create the key ownership proof. - let key_owner_proof = - Historical::prove((sp_consensus_beefy::KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); // check the dispatch info for the call. let info = Call::::report_equivocation_unsigned { diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index bebe74b1f4b3e..414b1b2301429 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -13,29 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } linregress = { version = "0.5.1", optional = true } log = { version = "0.4.17", default-features = false } paste = "1.0" scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "../support/procedural" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../primitives/runtime-interface" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-storage = { version = "7.0.0", default-features = false, path = "../../primitives/storage" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../../primitives/runtime-interface" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-storage = { version = "13.0.0", default-features = false, path = "../../primitives/storage" } static_assertions = "1.1.0" [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" rusty-fork = { version = "0.3.0", default-features = false } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } [features] default = ["std"] @@ -55,8 +55,11 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-storage/std", + "frame-support-procedural/std", + "sp-keystore/std" ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index 76673c5f69b33..dc6a184435df6 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -175,7 +175,6 @@ Then you can run a benchmark like so: ```bash ./target/production/substrate benchmark pallet \ --chain dev \ # Configurable Chain Spec - --execution=wasm \ # Always test with Wasm --wasm-execution=compiled \ # Always used `wasm-time` --pallet pallet_balances \ # Select the pallet --extrinsic transfer \ # Select the extrinsic diff --git a/frame/benchmarking/pov/Cargo.toml b/frame/benchmarking/pov/Cargo.toml index 28358142b674a..c0ba8285519a5 100644 --- a/frame/benchmarking/pov/Cargo.toml +++ b/frame/benchmarking/pov/Cargo.toml @@ -12,14 +12,14 @@ description = "Pallet for testing FRAME PoV benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [features] default = ["std"] @@ -36,8 +36,11 @@ std = [ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/benchmarking/pov/src/benchmarking.rs b/frame/benchmarking/pov/src/benchmarking.rs index 27191e37219fd..473947b171ac5 100644 --- a/frame/benchmarking/pov/src/benchmarking.rs +++ b/frame/benchmarking/pov/src/benchmarking.rs @@ -339,22 +339,17 @@ frame_benchmarking::benchmarks! { #[cfg(test)] mod mock { - use sp_runtime::testing::H256; + use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; - type AccountIndex = u32; - type BlockNumber = u64; + type Nonce = u32; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Baseline: crate::{Pallet, Call, Storage, Event}, } ); @@ -365,14 +360,13 @@ mod mock { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -391,6 +385,6 @@ mod mock { } pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } } diff --git a/frame/benchmarking/pov/src/lib.rs b/frame/benchmarking/pov/src/lib.rs index 66000e54151b0..eb02ccc983c09 100644 --- a/frame/benchmarking/pov/src/lib.rs +++ b/frame/benchmarking/pov/src/lib.rs @@ -32,7 +32,7 @@ pub mod pallet { use sp_std::prelude::*; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config { diff --git a/frame/benchmarking/pov/src/tests.rs b/frame/benchmarking/pov/src/tests.rs index b908925cccd8e..f09e37a5288a9 100644 --- a/frame/benchmarking/pov/src/tests.rs +++ b/frame/benchmarking/pov/src/tests.rs @@ -164,16 +164,12 @@ fn noop_is_free() { mod mock { use sp_runtime::testing::H256; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Baseline: crate::{Pallet, Call, Storage, Event}, } ); @@ -184,14 +180,13 @@ mod mock { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u32; - type BlockNumber = u64; + type Nonce = u32; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u32; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 25336b6974d9f..94c066269d91b 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -111,22 +111,17 @@ benchmarks! { #[cfg(test)] pub mod mock { use super::*; - use sp_runtime::testing::H256; + use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; - type AccountIndex = u32; - type BlockNumber = u64; + type Nonce = u32; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, } ); @@ -136,14 +131,13 @@ pub mod mock { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -162,7 +156,7 @@ pub mod mock { pub fn new_test_ext() -> sp_io::TestExternalities { use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index b11e164fe8e44..8864cd1d88ddf 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -128,7 +128,7 @@ pub use v1::*; /// as `I` in the case of an `#[instance_benchmarks]` module. You should not add these to the /// function signature as this will be handled automatically for you based on whether this is a /// `#[benchmarks]` or `#[instance_benchmarks]` module and whatever [where clause](#where-clause) -/// you have defined for the the module. You should not manually add any generics to the +/// you have defined for the module. You should not manually add any generics to the /// signature of your benchmark function. /// /// Also note that the `// setup code` and `// verification code` comments shown above are not @@ -246,22 +246,21 @@ pub use v1::*; /// of impls and structs required by the benchmarking engine. Additionally, a benchmark /// function is also generated that resembles the function definition you provide, with a few /// modifications: -/// 1. The function name is transformed from i.e. `original_name` to `_original_name` so as not -/// to collide with the struct `original_name` that is created for some of the benchmarking -/// engine impls. -/// 2. Appropriate `T: Config` and `I` (if this is an instance benchmark) generics are added to -/// the function automatically during expansion, so you should not add these manually on -/// your function definition (but you may make use of `T` and `I` anywhere within your -/// benchmark function, in any of the three sections (setup, call, verification). +/// 1. The function name is transformed from i.e. `original_name` to `_original_name` so as not to +/// collide with the struct `original_name` that is created for some of the benchmarking engine +/// impls. +/// 2. Appropriate `T: Config` and `I` (if this is an instance benchmark) generics are added to the +/// function automatically during expansion, so you should not add these manually on your +/// function definition (but you may make use of `T` and `I` anywhere within your benchmark +/// function, in any of the three sections (setup, call, verification). /// 3. Arguments such as `u: Linear<10, 100>` are converted to `u: u32` to make the function /// directly callable. -/// 4. A `verify: bool` param is added as the last argument. Specifying `true` will result in -/// the verification section of your function executing, while a value of `false` will skip +/// 4. A `verify: bool` param is added as the last argument. Specifying `true` will result in the +/// verification section of your function executing, while a value of `false` will skip /// verification. /// 5. If you specify a return type on the function definition, it must conform to the [rules -/// below](#support-for-result-benchmarkerror-and-the--operator), and the last statement of -/// the function definition must resolve to something compatible with `Result<(), -/// BenchmarkError>`. +/// below](#support-for-result-benchmarkerror-and-the--operator), and the last statement of the +/// function definition must resolve to something compatible with `Result<(), BenchmarkError>`. /// /// The reason we generate an actual function as part of the expansion is to allow the compiler /// to enforce several constraints that would otherwise be difficult to enforce and to reduce diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 7e240ee04903a..4b8339f65913d 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -22,7 +22,7 @@ use super::*; use frame_support::{parameter_types, traits::ConstU32}; use sp_runtime::{ - testing::{Header, H256}, + testing::H256, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; @@ -35,7 +35,7 @@ mod pallet_test { use frame_system::pallet_prelude::*; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config { @@ -66,16 +66,12 @@ mod pallet_test { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, TestPallet: pallet_test::{Pallet, Call, Storage}, } ); @@ -86,14 +82,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -118,7 +113,7 @@ impl pallet_test::Config for Test { } fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig::default().build_storage().unwrap().into() + RuntimeGenesisConfig::default().build_storage().unwrap().into() } thread_local! { diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index d017fc679875e..822efa70a2597 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -22,7 +22,7 @@ use super::*; use frame_support::traits::ConstU32; use sp_runtime::{ - testing::{Header, H256}, + testing::H256, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; @@ -76,16 +76,12 @@ mod pallet_test { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, TestPallet: pallet_test::{Pallet, Call, Storage, Event}, } ); @@ -96,13 +92,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); @@ -128,7 +123,7 @@ impl pallet_test::OtherConfig for Test { } fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig::default().build_storage().unwrap().into() + RuntimeGenesisConfig::default().build_storage().unwrap().into() } mod benchmarks { diff --git a/frame/benchmarking/src/v1.rs b/frame/benchmarking/src/v1.rs index be6dc393cbce3..d9f1c7dde0059 100644 --- a/frame/benchmarking/src/v1.rs +++ b/frame/benchmarking/src/v1.rs @@ -661,7 +661,7 @@ macro_rules! benchmark_backend { { $( PRE { $( $pre_parsed:tt )* } )* } { $eval:block } { - let $pre_id:tt : $pre_ty:ty = $pre_ex:expr; + let $pre_id:tt $( : $pre_ty:ty )? = $pre_ex:expr; $( $rest:tt )* } $postcode:block @@ -672,7 +672,7 @@ macro_rules! benchmark_backend { { $( $where_clause )* } { $( PRE { $( $pre_parsed )* } )* - PRE { $pre_id , $pre_ty , $pre_ex } + PRE { $pre_id , $( $pre_ty , )? $pre_ex } } { $eval } { $( $rest )* } @@ -756,39 +756,13 @@ macro_rules! benchmark_backend { $postcode } }; - // mutation arm to look after `let _ =` - ( - { $( $instance:ident: $instance_bound:tt )? } - $name:ident - { $( $where_clause:tt )* } - { $( $parsed:tt )* } - { $eval:block } - { - let $pre_id:tt = $pre_ex:expr; - $( $rest:tt )* - } - $postcode:block - ) => { - $crate::benchmark_backend! { - { $( $instance: $instance_bound )? } - $name - { $( $where_clause )* } - { $( $parsed )* } - { $eval } - { - let $pre_id : _ = $pre_ex; - $( $rest )* - } - $postcode - } - }; // actioning arm ( { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { - $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* + $( PRE { $pre_id:tt , $( $pre_ty:ty , )? $pre_ex:expr } )* $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* } { $eval:block } @@ -823,7 +797,7 @@ macro_rules! benchmark_backend { .1; )* $( - let $pre_id : $pre_ty = $pre_ex; + let $pre_id $( : $pre_ty )? = $pre_ex; )* $( $param_instancer ; )* $( $post )* @@ -1086,6 +1060,11 @@ macro_rules! impl_benchmark { $crate::well_known_keys::EXTRINSIC_INDEX.into() ); whitelist.push(extrinsic_index); + // Whitelist the `:intrablock_entropy`. + let intrablock_entropy = $crate::TrackedStorageKey::new( + $crate::well_known_keys::INTRABLOCK_ENTROPY.into() + ); + whitelist.push(intrablock_entropy); $crate::benchmarking::set_whitelist(whitelist.clone()); @@ -1847,9 +1826,9 @@ pub fn show_benchmark_debug_info( /// ``` #[macro_export] macro_rules! add_benchmark { - ( $params:ident, $batches:ident, $name:path, $( $location:tt )* ) => ( + ( $params:ident, $batches:ident, $name:path, $location:ty ) => { let name_string = stringify!($name).as_bytes(); - let instance_string = stringify!( $( $location )* ).as_bytes(); + let instance_string = stringify!($location).as_bytes(); let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, @@ -1859,7 +1838,7 @@ macro_rules! add_benchmark { internal_repeats, } = config; if &pallet[..] == &name_string[..] { - let benchmark_result = $( $location )*::run_benchmark( + let benchmark_result = <$location>::run_benchmark( &benchmark[..], &selected_components[..], whitelist, @@ -1876,9 +1855,7 @@ macro_rules! add_benchmark { $crate::str::from_utf8(benchmark) .expect("benchmark name is always a valid string!") ); - result.keys.insert(0, - (b"Benchmark Override".to_vec(), 0, 0, false) - ); + result.keys.insert(0, (b"Benchmark Override".to_vec(), 0, 0, false)); Some($crate::vec![result]) }, Err($crate::BenchmarkError::Stop(e)) => { @@ -1889,7 +1866,7 @@ macro_rules! add_benchmark { verify, e, ); - return Err(e.into()); + return Err(e.into()) }, Err($crate::BenchmarkError::Skip) => { $crate::log::error!( @@ -1907,9 +1884,9 @@ macro_rules! add_benchmark { ); Some(vec![$crate::BenchmarkResult { components: selected_components.clone(), - .. Default::default() + ..Default::default() }]) - } + }, }; if let Some(final_results) = final_results { @@ -1921,21 +1898,7 @@ macro_rules! add_benchmark { }); } } - ) -} - -/// Callback for `define_benchmarks` to call `add_benchmark`. -#[macro_export] -macro_rules! cb_add_benchmarks { - // anchor - ( $params:ident, $batches:ident, [ $name:path, $( $location:tt )* ] ) => { - $crate::add_benchmark!( $params, $batches, $name, $( $location )* ); }; - // recursion tail - ( $params:ident, $batches:ident, [ $name:path, $( $location:tt )* ] $([ $names:path, $( $locations:tt )* ])+ ) => { - $crate::cb_add_benchmarks!( $params, $batches, [ $name, $( $location )* ] ); - $crate::cb_add_benchmarks!( $params, $batches, $([ $names, $( $locations )* ])+ ); - } } /// This macro allows users to easily generate a list of benchmarks for the pallets configured @@ -1959,31 +1922,17 @@ macro_rules! cb_add_benchmarks { /// This should match what exists with the `add_benchmark!` macro. #[macro_export] macro_rules! list_benchmark { - ( $list:ident, $extra:ident, $name:path, $( $location:tt )* ) => ( + ( $list:ident, $extra:ident, $name:path, $location:ty ) => { let pallet_string = stringify!($name).as_bytes(); - let instance_string = stringify!( $( $location )* ).as_bytes(); - let benchmarks = $( $location )*::benchmarks($extra); + let instance_string = stringify!($location).as_bytes(); + let benchmarks = <$location>::benchmarks($extra); let pallet_benchmarks = BenchmarkList { pallet: pallet_string.to_vec(), instance: instance_string.to_vec(), benchmarks: benchmarks.to_vec(), }; $list.push(pallet_benchmarks) - ) -} - -/// Callback for `define_benchmarks` to call `list_benchmark`. -#[macro_export] -macro_rules! cb_list_benchmarks { - // anchor - ( $list:ident, $extra:ident, [ $name:path, $( $location:tt )* ] ) => { - $crate::list_benchmark!( $list, $extra, $name, $( $location )* ); }; - // recursion tail - ( $list:ident, $extra:ident, [ $name:path, $( $location:tt )* ] $([ $names:path, $( $locations:tt )* ])+ ) => { - $crate::cb_list_benchmarks!( $list, $extra, [ $name, $( $location )* ] ); - $crate::cb_list_benchmarks!( $list, $extra, $([ $names, $( $locations )* ])+ ); - } } /// Defines pallet configs that `add_benchmarks` and `list_benchmarks` use. @@ -1991,7 +1940,7 @@ macro_rules! cb_list_benchmarks { /// in `add_benchmark` and `list_benchmark`. #[macro_export] macro_rules! define_benchmarks { - ( $([ $names:path, $( $locations:tt )* ])* ) => { + ( $([ $names:path, $locations:ty ])* ) => { /// Calls `list_benchmark` with all configs from `define_benchmarks` /// and passes the first two parameters on. /// @@ -2002,7 +1951,7 @@ macro_rules! define_benchmarks { #[macro_export] macro_rules! list_benchmarks { ( $list:ident, $extra:ident ) => { - $crate::cb_list_benchmarks!( $list, $extra, $([ $names, $( $locations )* ])+ ); + $( $crate::list_benchmark!( $list, $extra, $names, $locations); )* } } @@ -2016,7 +1965,7 @@ macro_rules! define_benchmarks { #[macro_export] macro_rules! add_benchmarks { ( $params:ident, $batches:ident ) => { - $crate::cb_add_benchmarks!( $params, $batches, $([ $names, $( $locations )* ])+ ); + $( $crate::add_benchmark!( $params, $batches, $names, $locations ); )* } } } @@ -2028,8 +1977,6 @@ pub use benchmarks; pub use benchmarks_instance; pub use benchmarks_instance_pallet; pub use benchmarks_iter; -pub use cb_add_benchmarks; -pub use cb_list_benchmarks; pub use define_benchmarks; pub use impl_bench_case_tests; pub use impl_bench_name_tests; diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 25e2702f702d1..13d73e420cce2 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for frame_benchmarking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=frame_benchmarking +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for frame_benchmarking. pub trait WeightInfo { @@ -64,49 +68,49 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(205_895, 0) + // Minimum execution time: 147_000 picoseconds. + Weight::from_parts(185_656, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 180_000 picoseconds. - Weight::from_parts(206_967, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(189_816, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 174_000 picoseconds. - Weight::from_parts(214_304, 0) + // Minimum execution time: 148_000 picoseconds. + Weight::from_parts(202_367, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(207_804, 0) + // Minimum execution time: 143_000 picoseconds. + Weight::from_parts(189_693, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_173_551_000 picoseconds. - Weight::from_parts(21_256_886_000, 0) + // Minimum execution time: 24_167_071_000 picoseconds. + Weight::from_parts(24_391_749_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 208_000 picoseconds. - Weight::from_parts(1_227_077, 0) - // Standard Error: 9_390 - .saturating_add(Weight::from_parts(47_152_841, 0).saturating_mul(i.into())) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(2_998_013, 0) + // Standard Error: 6_256 + .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) } } @@ -117,48 +121,48 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(205_895, 0) + // Minimum execution time: 147_000 picoseconds. + Weight::from_parts(185_656, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 180_000 picoseconds. - Weight::from_parts(206_967, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(189_816, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 174_000 picoseconds. - Weight::from_parts(214_304, 0) + // Minimum execution time: 148_000 picoseconds. + Weight::from_parts(202_367, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_000 picoseconds. - Weight::from_parts(207_804, 0) + // Minimum execution time: 143_000 picoseconds. + Weight::from_parts(189_693, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_173_551_000 picoseconds. - Weight::from_parts(21_256_886_000, 0) + // Minimum execution time: 24_167_071_000 picoseconds. + Weight::from_parts(24_391_749_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 208_000 picoseconds. - Weight::from_parts(1_227_077, 0) - // Standard Error: 9_390 - .saturating_add(Weight::from_parts(47_152_841, 0).saturating_mul(i.into())) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(2_998_013, 0) + // Standard Error: 6_256 + .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) } } diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index a2f39ccd7ec0b..950284e26824e 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } log = { version = "0.4.17", default-features = false } @@ -22,10 +22,10 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -44,10 +44,20 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-treasury/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 0675328c3d3c2..6fff337cba450 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -24,7 +24,7 @@ use super::*; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::Bounded; use crate::Pallet as Bounties; @@ -77,7 +77,7 @@ fn create_bounty, I: 'static>( let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup.clone(), fee)?; Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) @@ -115,14 +115,14 @@ benchmarks_instance_pallet! { let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); }: _(approve_origin, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 2u32.into()); let caller = whitelisted_caller(); @@ -136,14 +136,14 @@ benchmarks_instance_pallet! { let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; @@ -154,7 +154,7 @@ benchmarks_instance_pallet! { claim_bounty { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; @@ -183,7 +183,7 @@ benchmarks_instance_pallet! { close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let approve_origin = T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -195,7 +195,7 @@ benchmarks_instance_pallet! { extend_bounty_expiry { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 14f7b45cb9aa1..c64a35672c7f7 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -201,11 +201,11 @@ pub mod pallet { /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] - type BountyDepositPayoutDelay: Get; + type BountyDepositPayoutDelay: Get>; /// Bounty duration in blocks. #[pallet::constant] - type BountyUpdatePeriod: Get; + type BountyUpdatePeriod: Get>; /// The curator deposit is calculated as a percentage of the curator fee. /// @@ -305,7 +305,7 @@ pub mod pallet { _, Twox64Concat, BountyIndex, - Bounty, T::BlockNumber>, + Bounty, BlockNumberFor>, >; /// The description of each bounty. @@ -361,7 +361,7 @@ pub mod pallet { ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; ensure!( bounty.value <= max_amount, pallet_treasury::Error::::InsufficientPermission @@ -396,7 +396,7 @@ pub mod pallet { let curator = T::Lookup::lookup(curator)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; ensure!( bounty.value <= max_amount, pallet_treasury::Error::::InsufficientPermission @@ -444,7 +444,7 @@ pub mod pallet { .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { @@ -527,7 +527,7 @@ pub mod pallet { let signer = ensure_signed(origin)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { BountyStatus::CuratorProposed { ref curator } => { @@ -571,7 +571,7 @@ pub mod pallet { let beneficiary = T::Lookup::lookup(beneficiary)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; // Ensure no active child bounties before processing the call. ensure!( diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index ef3da7564874e..a6fb89bb86012 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -23,37 +23,30 @@ use super::*; use crate as pallet_bounties; use frame_support::{ - assert_noop, assert_ok, - pallet_prelude::GenesisBuild, - parameter_types, + assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, OnInitialize}, PalletId, }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, Storage, }; use super::Event as BountiesEvent; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, Bounties1: pallet_bounties::::{Pallet, Call, Storage, Event}, - Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, - Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, } ); @@ -69,14 +62,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -102,7 +94,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -194,7 +186,7 @@ type TreasuryError = pallet_treasury::Error; type TreasuryError1 = pallet_treasury::Error; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { system: frame_system::GenesisConfig::default(), balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, treasury: Default::default(), @@ -236,7 +228,10 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -245,7 +240,10 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -255,7 +253,10 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), + { + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) + }, TreasuryError::InsufficientProposersBalance, ); }); @@ -266,8 +267,14 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -293,8 +300,14 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -307,10 +320,19 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }, TreasuryError::InvalidIndex ); }); @@ -320,7 +342,10 @@ fn reject_already_rejected_spend_proposal_fails() { fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }, pallet_treasury::Error::::InvalidIndex ); }); @@ -330,7 +355,10 @@ fn reject_non_existent_spend_proposal_fails() { fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }, TreasuryError::InvalidIndex ); }); @@ -341,10 +369,19 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }, TreasuryError::InvalidIndex ); }); @@ -356,8 +393,14 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -371,8 +414,14 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -393,14 +442,26 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 1) + }); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -412,7 +473,7 @@ fn treasury_account_doesnt_get_deleted() { // This is useful for chain that will just update runtime. #[test] fn inexistent_account_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } .assimilate_storage(&mut t) .unwrap(); @@ -423,10 +484,22 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 1) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -1073,7 +1146,7 @@ fn test_migration_v4() { #[test] fn genesis_funding_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let initial_funding = 100; pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. @@ -1081,7 +1154,9 @@ fn genesis_funding_works() { } .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + pallet_treasury::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); let mut t: sp_io::TestExternalities = t.into(); t.execute_with(|| { diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 5a84adf08210c..a172d15b56cc9 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_bounties +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_bounties. pub trait WeightInfo { @@ -77,10 +81,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3593` - // Minimum execution time: 30_793_000 picoseconds. - Weight::from_parts(31_509_544, 3593) - // Standard Error: 168 - .saturating_add(Weight::from_parts(2_219, 0).saturating_mul(d.into())) + // Minimum execution time: 29_384_000 picoseconds. + Weight::from_parts(30_820_018, 3593) + // Standard Error: 298 + .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -92,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `368` // Estimated: `3642` - // Minimum execution time: 12_471_000 picoseconds. - Weight::from_parts(12_677_000, 3642) + // Minimum execution time: 10_873_000 picoseconds. + Weight::from_parts(11_421_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -103,8 +107,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3642` - // Minimum execution time: 10_560_000 picoseconds. - Weight::from_parts(10_744_000, 3642) + // Minimum execution time: 9_181_000 picoseconds. + Weight::from_parts(9_726_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `3642` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_354_000, 3642) + // Minimum execution time: 30_257_000 picoseconds. + Weight::from_parts(30_751_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -129,8 +133,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `560` // Estimated: `3642` - // Minimum execution time: 29_257_000 picoseconds. - Weight::from_parts(29_656_000, 3642) + // Minimum execution time: 27_850_000 picoseconds. + Weight::from_parts(28_821_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -140,10 +144,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `572` // Estimated: `3642` - // Minimum execution time: 20_662_000 picoseconds. - Weight::from_parts(20_956_000, 3642) + // Minimum execution time: 19_164_000 picoseconds. + Weight::from_parts(20_136_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -157,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `902` + // Measured: `936` // Estimated: `8799` - // Minimum execution time: 119_287_000 picoseconds. - Weight::from_parts(121_468_000, 8799) + // Minimum execution time: 120_235_000 picoseconds. + Weight::from_parts(121_673_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -174,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `582` + // Measured: `616` // Estimated: `3642` - // Minimum execution time: 37_759_000 picoseconds. - Weight::from_parts(38_185_000, 3642) + // Minimum execution time: 35_713_000 picoseconds. + Weight::from_parts(37_174_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -191,10 +195,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `818` + // Measured: `852` // Estimated: `6196` - // Minimum execution time: 80_332_000 picoseconds. - Weight::from_parts(81_328_000, 6196) + // Minimum execution time: 81_037_000 picoseconds. + Weight::from_parts(83_294_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -204,8 +208,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `424` // Estimated: `3642` - // Minimum execution time: 16_301_000 picoseconds. - Weight::from_parts(16_611_000, 3642) + // Minimum execution time: 15_348_000 picoseconds. + Weight::from_parts(15_776_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,10 +224,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 5_430_000 picoseconds. - Weight::from_parts(4_463_976, 1887) - // Standard Error: 43_695 - .saturating_add(Weight::from_parts(39_370_113, 0).saturating_mul(b.into())) + // Minimum execution time: 5_082_000 picoseconds. + Weight::from_parts(5_126_000, 1887) + // Standard Error: 21_949 + .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -247,10 +251,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3593` - // Minimum execution time: 30_793_000 picoseconds. - Weight::from_parts(31_509_544, 3593) - // Standard Error: 168 - .saturating_add(Weight::from_parts(2_219, 0).saturating_mul(d.into())) + // Minimum execution time: 29_384_000 picoseconds. + Weight::from_parts(30_820_018, 3593) + // Standard Error: 298 + .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -262,8 +266,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `368` // Estimated: `3642` - // Minimum execution time: 12_471_000 picoseconds. - Weight::from_parts(12_677_000, 3642) + // Minimum execution time: 10_873_000 picoseconds. + Weight::from_parts(11_421_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -273,8 +277,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3642` - // Minimum execution time: 10_560_000 picoseconds. - Weight::from_parts(10_744_000, 3642) + // Minimum execution time: 9_181_000 picoseconds. + Weight::from_parts(9_726_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -286,8 +290,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564` // Estimated: `3642` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_354_000, 3642) + // Minimum execution time: 30_257_000 picoseconds. + Weight::from_parts(30_751_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -299,8 +303,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `560` // Estimated: `3642` - // Minimum execution time: 29_257_000 picoseconds. - Weight::from_parts(29_656_000, 3642) + // Minimum execution time: 27_850_000 picoseconds. + Weight::from_parts(28_821_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -310,10 +314,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `572` // Estimated: `3642` - // Minimum execution time: 20_662_000 picoseconds. - Weight::from_parts(20_956_000, 3642) + // Minimum execution time: 19_164_000 picoseconds. + Weight::from_parts(20_136_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -327,10 +331,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `902` + // Measured: `936` // Estimated: `8799` - // Minimum execution time: 119_287_000 picoseconds. - Weight::from_parts(121_468_000, 8799) + // Minimum execution time: 120_235_000 picoseconds. + Weight::from_parts(121_673_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -344,10 +348,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `582` + // Measured: `616` // Estimated: `3642` - // Minimum execution time: 37_759_000 picoseconds. - Weight::from_parts(38_185_000, 3642) + // Minimum execution time: 35_713_000 picoseconds. + Weight::from_parts(37_174_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -361,10 +365,10 @@ impl WeightInfo for () { /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `818` + // Measured: `852` // Estimated: `6196` - // Minimum execution time: 80_332_000 picoseconds. - Weight::from_parts(81_328_000, 6196) + // Minimum execution time: 81_037_000 picoseconds. + Weight::from_parts(83_294_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -374,8 +378,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `424` // Estimated: `3642` - // Minimum execution time: 16_301_000 picoseconds. - Weight::from_parts(16_611_000, 3642) + // Minimum execution time: 15_348_000 picoseconds. + Weight::from_parts(15_776_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -390,10 +394,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 5_430_000 picoseconds. - Weight::from_parts(4_463_976, 1887) - // Standard Error: 43_695 - .saturating_add(Weight::from_parts(39_370_113, 0).saturating_mul(b.into())) + // Minimum execution time: 5_082_000 picoseconds. + Weight::from_parts(5_126_000, 1887) + // Standard Error: 21_949 + .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/frame/child-bounties/Cargo.toml b/frame/child-bounties/Cargo.toml index 48fa222616152..7613c462eb0c2 100644 --- a/frame/child-bounties/Cargo.toml +++ b/frame/child-bounties/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } log = { version = "0.4.17", default-features = false } @@ -23,10 +23,10 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../bounties" } pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -46,10 +46,23 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-bounties/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-bounties/try-runtime", + "pallet-treasury/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index e49d9c836125d..1973564d0dc1d 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use crate::Pallet as ChildBounties; use pallet_bounties::Pallet as Bounties; @@ -114,7 +114,7 @@ fn activate_bounty( let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin, child_bounty_setup.bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); Bounties::::propose_curator( RawOrigin::Root.into(), child_bounty_setup.bounty_id, @@ -229,7 +229,7 @@ benchmarks! { unassign_curator { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_setup.bounty_id, @@ -303,7 +303,7 @@ benchmarks! { close_child_bounty_active { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id) verify { assert_last_event::(Event::Canceled { diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 094b41822c433..1eedeaa5a1ae3 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -200,7 +200,7 @@ pub mod pallet { BountyIndex, Twox64Concat, BountyIndex, - ChildBounty, T::BlockNumber>, + ChildBounty, BlockNumberFor>, >; /// The description of each child-bounty. @@ -331,7 +331,7 @@ pub mod pallet { parent_bounty_id, child_bounty_id, |maybe_child_bounty| -> DispatchResult { - let mut child_bounty = + let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; // Ensure child-bounty is in expected state. @@ -396,7 +396,7 @@ pub mod pallet { parent_bounty_id, child_bounty_id, |maybe_child_bounty| -> DispatchResult { - let mut child_bounty = + let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; // Ensure child-bounty is in expected state. @@ -473,7 +473,7 @@ pub mod pallet { parent_bounty_id, child_bounty_id, |maybe_child_bounty| -> DispatchResult { - let mut child_bounty = + let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; let slash_curator = |curator: &T::AccountId, @@ -591,7 +591,7 @@ pub mod pallet { parent_bounty_id, child_bounty_id, |maybe_child_bounty| -> DispatchResult { - let mut child_bounty = + let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; // Ensure child-bounty is in active state. @@ -816,7 +816,7 @@ impl Pallet { fn ensure_bounty_active( bounty_id: BountyIndex, - ) -> Result<(T::AccountId, T::BlockNumber), DispatchError> { + ) -> Result<(T::AccountId, BlockNumberFor), DispatchError> { let parent_bounty = pallet_bounties::Pallet::::bounties(bounty_id) .ok_or(BountiesError::::InvalidIndex)?; if let BountyStatus::Active { curator, update_due } = parent_bounty.get_status() { diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index a936312aec868..24a6410f29f78 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -23,9 +23,7 @@ use super::*; use crate as pallet_child_bounties; use frame_support::{ - assert_noop, assert_ok, - pallet_prelude::GenesisBuild, - parameter_types, + assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, OnInitialize}, weights::Weight, PalletId, @@ -33,27 +31,22 @@ use frame_support::{ use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - Perbill, Permill, TokenError, + BuildStorage, Perbill, Permill, TokenError, }; use super::Event as ChildBountiesEvent; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type BountiesError = pallet_bounties::Error; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, - Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, ChildBounties: pallet_child_bounties::{Pallet, Call, Storage, Event}, } ); @@ -72,14 +65,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -105,7 +97,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -162,14 +154,16 @@ impl pallet_child_bounties::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], } .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + pallet_treasury::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index be30e80a19f27..e4c1f238e88b7 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_child_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_child_bounties +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_child_bounties. pub trait WeightInfo { @@ -76,10 +80,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `678` + // Measured: `712` // Estimated: `6196` - // Minimum execution time: 69_784_000 picoseconds. - Weight::from_parts(71_225_354, 6196) + // Minimum execution time: 69_805_000 picoseconds. + Weight::from_parts(73_216_717, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -91,10 +95,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `732` + // Measured: `766` // Estimated: `3642` - // Minimum execution time: 19_008_000 picoseconds. - Weight::from_parts(19_219_000, 3642) + // Minimum execution time: 18_190_000 picoseconds. + Weight::from_parts(18_932_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -106,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `878` + // Measured: `912` // Estimated: `3642` - // Minimum execution time: 35_457_000 picoseconds. - Weight::from_parts(36_088_000, 3642) + // Minimum execution time: 35_035_000 picoseconds. + Weight::from_parts(35_975_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -121,10 +125,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `878` + // Measured: `912` // Estimated: `3642` - // Minimum execution time: 38_244_000 picoseconds. - Weight::from_parts(38_611_000, 3642) + // Minimum execution time: 37_636_000 picoseconds. + Weight::from_parts(38_610_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -134,10 +138,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `809` // Estimated: `3642` - // Minimum execution time: 22_762_000 picoseconds. - Weight::from_parts(23_249_000, 3642) + // Minimum execution time: 22_457_000 picoseconds. + Weight::from_parts(23_691_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -151,10 +155,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `648` + // Measured: `682` // Estimated: `8799` - // Minimum execution time: 112_019_000 picoseconds. - Weight::from_parts(113_190_000, 8799) + // Minimum execution time: 118_272_000 picoseconds. + Weight::from_parts(121_646_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -172,10 +176,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `978` + // Measured: `1012` // Estimated: `6196` - // Minimum execution time: 72_477_000 picoseconds. - Weight::from_parts(73_573_000, 6196) + // Minimum execution time: 75_717_000 picoseconds. + Weight::from_parts(77_837_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -193,10 +197,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1165` + // Measured: `1199` // Estimated: `8799` - // Minimum execution time: 91_049_000 picoseconds. - Weight::from_parts(91_874_000, 8799) + // Minimum execution time: 94_215_000 picoseconds. + Weight::from_parts(97_017_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -219,10 +223,10 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `678` + // Measured: `712` // Estimated: `6196` - // Minimum execution time: 69_784_000 picoseconds. - Weight::from_parts(71_225_354, 6196) + // Minimum execution time: 69_805_000 picoseconds. + Weight::from_parts(73_216_717, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -234,10 +238,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `732` + // Measured: `766` // Estimated: `3642` - // Minimum execution time: 19_008_000 picoseconds. - Weight::from_parts(19_219_000, 3642) + // Minimum execution time: 18_190_000 picoseconds. + Weight::from_parts(18_932_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -249,10 +253,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `878` + // Measured: `912` // Estimated: `3642` - // Minimum execution time: 35_457_000 picoseconds. - Weight::from_parts(36_088_000, 3642) + // Minimum execution time: 35_035_000 picoseconds. + Weight::from_parts(35_975_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -264,10 +268,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `878` + // Measured: `912` // Estimated: `3642` - // Minimum execution time: 38_244_000 picoseconds. - Weight::from_parts(38_611_000, 3642) + // Minimum execution time: 37_636_000 picoseconds. + Weight::from_parts(38_610_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -277,10 +281,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `809` // Estimated: `3642` - // Minimum execution time: 22_762_000 picoseconds. - Weight::from_parts(23_249_000, 3642) + // Minimum execution time: 22_457_000 picoseconds. + Weight::from_parts(23_691_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -294,10 +298,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `648` + // Measured: `682` // Estimated: `8799` - // Minimum execution time: 112_019_000 picoseconds. - Weight::from_parts(113_190_000, 8799) + // Minimum execution time: 118_272_000 picoseconds. + Weight::from_parts(121_646_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -315,10 +319,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `978` + // Measured: `1012` // Estimated: `6196` - // Minimum execution time: 72_477_000 picoseconds. - Weight::from_parts(73_573_000, 6196) + // Minimum execution time: 75_717_000 picoseconds. + Weight::from_parts(77_837_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -336,10 +340,10 @@ impl WeightInfo for () { /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1165` + // Measured: `1199` // Estimated: `8799` - // Minimum execution time: 91_049_000 picoseconds. - Weight::from_parts(91_874_000, 8799) + // Minimum execution time: 94_215_000 picoseconds. + Weight::from_parts(97_017_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 7ae8817474a47..2ca447434f4b0 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -13,16 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -44,4 +44,8 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index bcd203c3894a3..503d725105309 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -24,7 +24,9 @@ use sp_runtime::traits::Bounded; use sp_std::mem::size_of; use frame_benchmarking::v1::{account, benchmarks_instance_pallet, whitelisted_caller}; -use frame_system::{Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin}; +use frame_system::{ + pallet_prelude::BlockNumberFor, Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin, +}; const SEED: u32 = 0; @@ -131,7 +133,7 @@ benchmarks_instance_pallet! { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - Event::MemberExecuted { proposal_hash, result: Err(DispatchError::BadOrigin) }.into() + Event::MemberExecuted { proposal_hash, result: Ok(()) }.into() ); } @@ -162,7 +164,7 @@ benchmarks_instance_pallet! { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - Event::Executed { proposal_hash, result: Err(DispatchError::BadOrigin) }.into() + Event::Executed { proposal_hash, result: Ok(()) }.into() ); } @@ -441,7 +443,7 @@ benchmarks_instance_pallet! { verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); + assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } close_disapproved { @@ -516,7 +518,7 @@ benchmarks_instance_pallet! { false, )?; - System::::set_block_number(T::BlockNumber::max_value()); + System::::set_block_number(BlockNumberFor::::max_value()); assert_eq!(Collective::::proposals().len(), p as usize); // Prime nay will close it as disapproved @@ -588,14 +590,14 @@ benchmarks_instance_pallet! { } // caller is prime, prime already votes aye by creating the proposal - System::::set_block_number(T::BlockNumber::max_value()); + System::::set_block_number(BlockNumberFor::::max_value()); assert_eq!(Collective::::proposals().len(), p as usize); // Prime aye will close it as approved }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); + assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } disapprove_proposal { @@ -637,7 +639,7 @@ benchmarks_instance_pallet! { last_hash = T::Hashing::hash_of(&proposal); } - System::::set_block_number(T::BlockNumber::max_value()); + System::::set_block_number(BlockNumberFor::::max_value()); assert_eq!(Collective::::proposals().len(), p as usize); }: _(SystemOrigin::Root, last_hash) diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 12917f5bd6e34..1084091173934 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -53,13 +53,17 @@ use frame_support::{ DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo, }, - ensure, + ensure, impl_ensure_origin_with_arg_ignoring_arg, traits::{ - Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, + Backing, ChangeMembers, EnsureOrigin, EnsureOriginWithArg, Get, GetBacking, + InitializeMembers, StorageVersion, }, weights::Weight, }; +#[cfg(any(feature = "try-runtime", test))] +use sp_runtime::TryRuntimeError; + #[cfg(test)] mod tests; @@ -197,7 +201,7 @@ pub mod pallet { + IsType<::RuntimeEvent>; /// The time-out for council motions. - type MotionDuration: Get; + type MotionDuration: Get>; /// Maximum number of proposals allowed to be active in parallel. type MaxProposals: Get; @@ -226,12 +230,13 @@ pub mod pallet { #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { + #[serde(skip)] pub phantom: PhantomData, pub members: Vec, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { use sp_std::collections::btree_set::BTreeSet; let members_set: BTreeSet<_> = self.members.iter().collect(); @@ -269,7 +274,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn voting)] pub type Voting, I: 'static = ()> = - StorageMap<_, Identity, T::Hash, Votes, OptionQuery>; + StorageMap<_, Identity, T::Hash, Votes>, OptionQuery>; /// Proposals so far. #[pallet::storage] @@ -341,14 +346,15 @@ pub mod pallet { WrongProposalWeight, /// The given length bound for the proposal was too low. WrongProposalLength, + /// Prime account is not a member + PrimeAccountNotMember, } #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), &'static str> { - Self::do_try_state()?; - Ok(()) + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { + Self::do_try_state() } } @@ -413,6 +419,9 @@ pub mod pallet { old.len(), ); } + if let Some(p) = &prime { + ensure!(new_members.contains(p), Error::::PrimeAccountNotMember); + } let mut new_members = new_members; new_members.sort(); >::set_members_sorted(&new_members, &old); @@ -967,77 +976,78 @@ impl, I: 'static> Pallet { /// Looking at prime account: /// * The prime account must be a member of the collective. #[cfg(any(feature = "try-runtime", test))] - fn do_try_state() -> DispatchResult { - Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { - ensure!( - Self::proposal_of(proposal).is_some(), - DispatchError::Other( + fn do_try_state() -> Result<(), TryRuntimeError> { + Self::proposals() + .into_iter() + .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + ensure!( + Self::proposal_of(proposal).is_some(), "Proposal hash from `Proposals` is not found inside the `ProposalOf` mapping." - ) - ); - Ok(()) - })?; + ); + Ok(()) + })?; ensure!( Self::proposals().into_iter().count() <= Self::proposal_count() as usize, - DispatchError::Other("The actual number of proposals is greater than `ProposalCount`") + "The actual number of proposals is greater than `ProposalCount`" ); ensure!( Self::proposals().into_iter().count() == >::iter_keys().count(), - DispatchError::Other("Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`") + "Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`" ); - Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { - if let Some(votes) = Self::voting(proposal) { - let ayes = votes.ayes.len(); - let nays = votes.nays.len(); - - ensure!( - ayes.saturating_add(nays) <= T::MaxMembers::get() as usize, - DispatchError::Other("The sum of ayes and nays is greater than `MaxMembers`") - ); - } - Ok(()) - })?; + Self::proposals() + .into_iter() + .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Self::voting(proposal) { + let ayes = votes.ayes.len(); + let nays = votes.nays.len(); + + ensure!( + ayes.saturating_add(nays) <= T::MaxMembers::get() as usize, + "The sum of ayes and nays is greater than `MaxMembers`" + ); + } + Ok(()) + })?; let mut proposal_indices = vec![]; - Self::proposals().into_iter().try_for_each(|proposal| -> DispatchResult { - if let Some(votes) = Self::voting(proposal) { - let proposal_index = votes.index; - ensure!( - !proposal_indices.contains(&proposal_index), - DispatchError::Other("The proposal index is not unique.") - ); - proposal_indices.push(proposal_index); - } - Ok(()) - })?; + Self::proposals() + .into_iter() + .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Self::voting(proposal) { + let proposal_index = votes.index; + ensure!( + !proposal_indices.contains(&proposal_index), + "The proposal index is not unique." + ); + proposal_indices.push(proposal_index); + } + Ok(()) + })?; - >::iter_keys().try_for_each(|proposal_hash| -> DispatchResult { - ensure!( - Self::proposals().contains(&proposal_hash), - DispatchError::Other( + >::iter_keys().try_for_each( + |proposal_hash| -> Result<(), TryRuntimeError> { + ensure!( + Self::proposals().contains(&proposal_hash), "`Proposals` doesn't contain the proposal hash from the `Voting` storage map." - ) - ); - Ok(()) - })?; + ); + Ok(()) + }, + )?; ensure!( Self::members().len() <= T::MaxMembers::get() as usize, - DispatchError::Other("The member count is greater than `MaxMembers`.") + "The member count is greater than `MaxMembers`." ); ensure!( Self::members().windows(2).all(|members| members[0] <= members[1]), - DispatchError::Other("The members are not sorted by value.") + "The members are not sorted by value." ); if let Some(prime) = Self::prime() { - ensure!( - Self::members().contains(&prime), - DispatchError::Other("Prime account is not a member.") - ); + ensure!(Self::members().contains(&prime), "Prime account is not a member."); } Ok(()) @@ -1151,6 +1161,12 @@ impl< } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., I: 'static, AccountId: Decode, T } > + EnsureOriginWithArg for EnsureMember + {} +} + pub struct EnsureMembers(PhantomData<(AccountId, I)>); impl< O: Into, O>> + From>, @@ -1173,6 +1189,12 @@ impl< } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., I: 'static, const N: u32, AccountId, T } > + EnsureOriginWithArg for EnsureMembers + {} +} + pub struct EnsureProportionMoreThan( PhantomData<(AccountId, I)>, ); @@ -1198,6 +1220,12 @@ impl< } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., I: 'static, const N: u32, const D: u32, AccountId, T } > + EnsureOriginWithArg for EnsureProportionMoreThan + {} +} + pub struct EnsureProportionAtLeast( PhantomData<(AccountId, I)>, ); @@ -1222,3 +1250,9 @@ impl< Ok(O::from(RawOrigin::Members(0u32, 0u32))) } } + +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., I: 'static, const N: u32, const D: u32, AccountId, T } > + EnsureOriginWithArg for EnsureProportionAtLeast + {} +} diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 99aa7a57e1604..86b85e07a8bd9 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -21,7 +21,7 @@ use frame_support::{ assert_noop, assert_ok, dispatch::Pays, parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, StorageVersion}, + traits::{ConstU32, ConstU64, StorageVersion}, Hashable, }; use frame_system::{EnsureRoot, EventRecord, Phase}; @@ -36,10 +36,7 @@ pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Test { System: frame_system::{Pallet, Call, Event}, Collective: pallet_collective::::{Pallet, Call, Event, Origin, Config}, @@ -99,14 +96,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -177,7 +173,7 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { collective: pallet_collective::GenesisConfig { members: self.collective_members, phantom: Default::default(), @@ -236,6 +232,25 @@ fn initialize_members_sorts_members() { }); } +#[test] +fn set_members_with_prime_works() { + ExtBuilder::default().build_and_execute(|| { + let members = vec![1, 2, 3]; + assert_ok!(Collective::set_members( + RuntimeOrigin::root(), + members.clone(), + Some(3), + MaxMembers::get() + )); + assert_eq!(Collective::members(), members.clone()); + assert_eq!(Collective::prime(), Some(3)); + assert_noop!( + Collective::set_members(RuntimeOrigin::root(), members, Some(4), MaxMembers::get()), + Error::::PrimeAccountNotMember + ); + }); +} + #[test] fn proposal_weight_limit_works() { ExtBuilder::default().build_and_execute(|| { diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index bf739daca0931..eece6a006b8f2 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_collective. pub trait WeightInfo { @@ -77,13 +81,13 @@ impl WeightInfo for SubstrateWeight { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15861 + m * (1967 ±23) + p * (4332 ±23)` - // Minimum execution time: 19_398_000 picoseconds. - Weight::from_parts(19_542_000, 15861) - // Standard Error: 71_395 - .saturating_add(Weight::from_parts(5_630_062, 0).saturating_mul(m.into())) - // Standard Error: 71_395 - .saturating_add(Weight::from_parts(8_634_133, 0).saturating_mul(p.into())) + // Estimated: `15861 + m * (1967 ±24) + p * (4332 ±24)` + // Minimum execution time: 17_506_000 picoseconds. + Weight::from_parts(17_767_000, 15861) + // Standard Error: 60_220 + .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) + // Standard Error: 60_220 + .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -99,12 +103,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `202 + m * (32 ±0)` // Estimated: `1688 + m * (32 ±0)` - // Minimum execution time: 17_579_000 picoseconds. - Weight::from_parts(16_874_624, 1688) - // Standard Error: 34 - .saturating_add(Weight::from_parts(1_617, 0).saturating_mul(b.into())) - // Standard Error: 353 - .saturating_add(Weight::from_parts(19_759, 0).saturating_mul(m.into())) + // Minimum execution time: 16_203_000 picoseconds. + Weight::from_parts(15_348_267, 1688) + // Standard Error: 37 + .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) + // Standard Error: 382 + .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -118,12 +122,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `202 + m * (32 ±0)` // Estimated: `3668 + m * (32 ±0)` - // Minimum execution time: 20_339_000 picoseconds. - Weight::from_parts(19_534_549, 3668) - // Standard Error: 45 - .saturating_add(Weight::from_parts(1_636, 0).saturating_mul(b.into())) - // Standard Error: 469 - .saturating_add(Weight::from_parts(28_178, 0).saturating_mul(m.into())) + // Minimum execution time: 18_642_000 picoseconds. + Weight::from_parts(17_708_609, 3668) + // Standard Error: 58 + .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) + // Standard Error: 598 + .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -144,14 +148,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `492 + m * (32 ±0) + p * (36 ±0)` // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 27_793_000 picoseconds. - Weight::from_parts(28_095_462, 3884) - // Standard Error: 82 - .saturating_add(Weight::from_parts(2_646, 0).saturating_mul(b.into())) - // Standard Error: 861 - .saturating_add(Weight::from_parts(22_332, 0).saturating_mul(m.into())) - // Standard Error: 850 - .saturating_add(Weight::from_parts(121_560, 0).saturating_mul(p.into())) + // Minimum execution time: 27_067_000 picoseconds. + Weight::from_parts(25_456_964, 3884) + // Standard Error: 112 + .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -166,10 +170,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `941 + m * (64 ±0)` // Estimated: `4405 + m * (64 ±0)` - // Minimum execution time: 23_096_000 picoseconds. - Weight::from_parts(23_793_304, 4405) - // Standard Error: 675 - .saturating_add(Weight::from_parts(51_741, 0).saturating_mul(m.into())) + // Minimum execution time: 26_055_000 picoseconds. + Weight::from_parts(27_251_907, 4405) + // Standard Error: 1_008 + .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -188,12 +192,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `530 + m * (64 ±0) + p * (36 ±0)` // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 29_635_000 picoseconds. - Weight::from_parts(29_574_124, 3975) - // Standard Error: 755 - .saturating_add(Weight::from_parts(29_126, 0).saturating_mul(m.into())) - // Standard Error: 737 - .saturating_add(Weight::from_parts(123_438, 0).saturating_mul(p.into())) + // Minimum execution time: 28_363_000 picoseconds. + Weight::from_parts(28_733_464, 3975) + // Standard Error: 1_275 + .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) + // Standard Error: 1_244 + .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -214,14 +218,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 41_934_000 picoseconds. - Weight::from_parts(44_022_379, 4149) - // Standard Error: 105 - .saturating_add(Weight::from_parts(2_266, 0).saturating_mul(b.into())) - // Standard Error: 1_112 - .saturating_add(Weight::from_parts(18_074, 0).saturating_mul(m.into())) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(132_405, 0).saturating_mul(p.into())) + // Minimum execution time: 40_391_000 picoseconds. + Weight::from_parts(42_695_215, 4149) + // Standard Error: 167 + .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) + // Standard Error: 1_772 + .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) + // Standard Error: 1_727 + .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -244,12 +248,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `550 + m * (64 ±0) + p * (36 ±0)` // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 33_146_000 picoseconds. - Weight::from_parts(31_957_128, 3995) - // Standard Error: 2_321 - .saturating_add(Weight::from_parts(31_272, 0).saturating_mul(m.into())) - // Standard Error: 2_264 - .saturating_add(Weight::from_parts(156_129, 0).saturating_mul(p.into())) + // Minimum execution time: 31_368_000 picoseconds. + Weight::from_parts(32_141_835, 3995) + // Standard Error: 1_451 + .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) + // Standard Error: 1_415 + .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -272,14 +276,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 44_278_000 picoseconds. - Weight::from_parts(46_039_907, 4169) - // Standard Error: 100 - .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(b.into())) - // Standard Error: 1_062 - .saturating_add(Weight::from_parts(25_055, 0).saturating_mul(m.into())) - // Standard Error: 1_035 - .saturating_add(Weight::from_parts(136_282, 0).saturating_mul(p.into())) + // Minimum execution time: 43_271_000 picoseconds. + Weight::from_parts(45_495_648, 4169) + // Standard Error: 174 + .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) + // Standard Error: 1_793 + .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -297,10 +301,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `359 + p * (32 ±0)` // Estimated: `1844 + p * (32 ±0)` - // Minimum execution time: 16_500_000 picoseconds. - Weight::from_parts(18_376_538, 1844) - // Standard Error: 755 - .saturating_add(Weight::from_parts(113_189, 0).saturating_mul(p.into())) + // Minimum execution time: 15_170_000 picoseconds. + Weight::from_parts(17_567_243, 1844) + // Standard Error: 1_430 + .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) @@ -323,13 +327,13 @@ impl WeightInfo for () { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15861 + m * (1967 ±23) + p * (4332 ±23)` - // Minimum execution time: 19_398_000 picoseconds. - Weight::from_parts(19_542_000, 15861) - // Standard Error: 71_395 - .saturating_add(Weight::from_parts(5_630_062, 0).saturating_mul(m.into())) - // Standard Error: 71_395 - .saturating_add(Weight::from_parts(8_634_133, 0).saturating_mul(p.into())) + // Estimated: `15861 + m * (1967 ±24) + p * (4332 ±24)` + // Minimum execution time: 17_506_000 picoseconds. + Weight::from_parts(17_767_000, 15861) + // Standard Error: 60_220 + .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) + // Standard Error: 60_220 + .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -345,12 +349,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `202 + m * (32 ±0)` // Estimated: `1688 + m * (32 ±0)` - // Minimum execution time: 17_579_000 picoseconds. - Weight::from_parts(16_874_624, 1688) - // Standard Error: 34 - .saturating_add(Weight::from_parts(1_617, 0).saturating_mul(b.into())) - // Standard Error: 353 - .saturating_add(Weight::from_parts(19_759, 0).saturating_mul(m.into())) + // Minimum execution time: 16_203_000 picoseconds. + Weight::from_parts(15_348_267, 1688) + // Standard Error: 37 + .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) + // Standard Error: 382 + .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -364,12 +368,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `202 + m * (32 ±0)` // Estimated: `3668 + m * (32 ±0)` - // Minimum execution time: 20_339_000 picoseconds. - Weight::from_parts(19_534_549, 3668) - // Standard Error: 45 - .saturating_add(Weight::from_parts(1_636, 0).saturating_mul(b.into())) - // Standard Error: 469 - .saturating_add(Weight::from_parts(28_178, 0).saturating_mul(m.into())) + // Minimum execution time: 18_642_000 picoseconds. + Weight::from_parts(17_708_609, 3668) + // Standard Error: 58 + .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) + // Standard Error: 598 + .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -390,14 +394,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `492 + m * (32 ±0) + p * (36 ±0)` // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 27_793_000 picoseconds. - Weight::from_parts(28_095_462, 3884) - // Standard Error: 82 - .saturating_add(Weight::from_parts(2_646, 0).saturating_mul(b.into())) - // Standard Error: 861 - .saturating_add(Weight::from_parts(22_332, 0).saturating_mul(m.into())) - // Standard Error: 850 - .saturating_add(Weight::from_parts(121_560, 0).saturating_mul(p.into())) + // Minimum execution time: 27_067_000 picoseconds. + Weight::from_parts(25_456_964, 3884) + // Standard Error: 112 + .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -412,10 +416,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `941 + m * (64 ±0)` // Estimated: `4405 + m * (64 ±0)` - // Minimum execution time: 23_096_000 picoseconds. - Weight::from_parts(23_793_304, 4405) - // Standard Error: 675 - .saturating_add(Weight::from_parts(51_741, 0).saturating_mul(m.into())) + // Minimum execution time: 26_055_000 picoseconds. + Weight::from_parts(27_251_907, 4405) + // Standard Error: 1_008 + .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -434,12 +438,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `530 + m * (64 ±0) + p * (36 ±0)` // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 29_635_000 picoseconds. - Weight::from_parts(29_574_124, 3975) - // Standard Error: 755 - .saturating_add(Weight::from_parts(29_126, 0).saturating_mul(m.into())) - // Standard Error: 737 - .saturating_add(Weight::from_parts(123_438, 0).saturating_mul(p.into())) + // Minimum execution time: 28_363_000 picoseconds. + Weight::from_parts(28_733_464, 3975) + // Standard Error: 1_275 + .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) + // Standard Error: 1_244 + .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -460,14 +464,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 41_934_000 picoseconds. - Weight::from_parts(44_022_379, 4149) - // Standard Error: 105 - .saturating_add(Weight::from_parts(2_266, 0).saturating_mul(b.into())) - // Standard Error: 1_112 - .saturating_add(Weight::from_parts(18_074, 0).saturating_mul(m.into())) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(132_405, 0).saturating_mul(p.into())) + // Minimum execution time: 40_391_000 picoseconds. + Weight::from_parts(42_695_215, 4149) + // Standard Error: 167 + .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) + // Standard Error: 1_772 + .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) + // Standard Error: 1_727 + .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -490,12 +494,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `550 + m * (64 ±0) + p * (36 ±0)` // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 33_146_000 picoseconds. - Weight::from_parts(31_957_128, 3995) - // Standard Error: 2_321 - .saturating_add(Weight::from_parts(31_272, 0).saturating_mul(m.into())) - // Standard Error: 2_264 - .saturating_add(Weight::from_parts(156_129, 0).saturating_mul(p.into())) + // Minimum execution time: 31_368_000 picoseconds. + Weight::from_parts(32_141_835, 3995) + // Standard Error: 1_451 + .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) + // Standard Error: 1_415 + .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -518,14 +522,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 44_278_000 picoseconds. - Weight::from_parts(46_039_907, 4169) - // Standard Error: 100 - .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(b.into())) - // Standard Error: 1_062 - .saturating_add(Weight::from_parts(25_055, 0).saturating_mul(m.into())) - // Standard Error: 1_035 - .saturating_add(Weight::from_parts(136_282, 0).saturating_mul(p.into())) + // Minimum execution time: 43_271_000 picoseconds. + Weight::from_parts(45_495_648, 4169) + // Standard Error: 174 + .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) + // Standard Error: 1_793 + .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -543,10 +547,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `359 + p * (32 ±0)` // Estimated: `1844 + p * (32 ±0)` - // Minimum execution time: 16_500_000 picoseconds. - Weight::from_parts(18_376_538, 1844) - // Standard Error: 755 - .saturating_add(Weight::from_parts(113_189, 0).saturating_mul(p.into())) + // Minimum execution time: 15_170_000 picoseconds. + Weight::from_parts(17_567_243, 1844) + // Standard Error: 1_430 + .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index edb6d294cfcd5..a5c309adc97bc 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -3,6 +3,7 @@ name = "pallet-contracts" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" +build = "build.rs" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" @@ -15,22 +16,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.3" -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } -wasm-instrument = { version = "0.4", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } -wasmi = { version = "0.28", default-features = false } -wasmparser = { package = "wasmparser-nostd", version = "0.100", default-features = false } +wasmi = { version = "0.30", default-features = false } impl-trait-for-tuples = "0.2" -# Only used in benchmarking to generate random contract code +# Only used in benchmarking to generate contract code +wasm-instrument = { version = "0.4", optional = true, default-features = false } rand = { version = "0.8", optional = true, default-features = false } rand_pcg = { version = "0.3", optional = true } @@ -39,16 +39,17 @@ environmental = { version = "1.1.4", default-features = false } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "7.0.0", default-features = false, path = "primitives" } +pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true, default-features = false } +pallet-contracts-primitives = { version = "24.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" assert_matches = "1" env_logger = "0.9" pretty_assertions = "1" @@ -60,7 +61,7 @@ pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } pallet-insecure-randomness-collective-flip = { version = "4.0.0-dev", path = "../insecure-randomness-collective-flip" } pallet-utility = { version = "4.0.0-dev", path = "../utility" } pallet-proxy = { version = "4.0.0-dev", path = "../proxy" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } [features] default = ["std"] @@ -81,12 +82,36 @@ std = [ "pallet-contracts-proc-macro/full", "log/std", "rand/std", - "wasmparser/std", "environmental/std", + "pallet-balances?/std", + "pallet-insecure-randomness-collective-flip/std", + "pallet-proxy/std", + "pallet-timestamp/std", + "pallet-utility/std", + "sp-api/std", + "sp-keystore/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "rand", "rand_pcg", + "wasm-instrument", + "pallet-balances/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-proxy/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-utility/try-runtime", + "sp-runtime/try-runtime", +] +unsafe-debug = [] diff --git a/frame/contracts/README.md b/frame/contracts/README.md index 13c5e7253c1d8..aeb30cef32fc8 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -9,12 +9,12 @@ The Contracts module provides functionality for the runtime to deploy and execut ## Overview -This module extends accounts based on the `Currency` trait to have smart-contract functionality. It can -be used with other modules that implement accounts based on `Currency`. These "smart-contract accounts" +This module extends accounts based on the [`frame_support::traits::fungible`] traits to have smart-contract functionality. It can +be used with other modules that implement accounts based on [`frame_support::traits::fungible`]. These "smart-contract accounts" have the ability to instantiate smart-contracts and make calls to other contract and non-contract accounts. -The smart-contract code is stored once in a `code_cache`, and later retrievable via its `code_hash`. -This means that multiple smart-contracts can be instantiated from the same `code_cache`, without replicating +The smart-contract code is stored once, and later retrievable via its `code_hash`. +This means that multiple smart-contracts can be instantiated from the same `code`, without replicating the code each time. When a smart-contract is called, its associated code is retrieved via the code hash and gets executed. @@ -24,18 +24,17 @@ or call other smart-contracts. Finally, when an account is reaped, its associated code and storage of the smart-contract account will also be deleted. -### Gas +### Weight -Senders must specify a gas limit with every call, as all instructions invoked by the smart-contract require gas. -Unused gas is refunded after the call, regardless of the execution outcome. +Senders must specify a [`Weight`](https://paritytech.github.io/substrate/master/sp_weights/struct.Weight.html) limit with every call, as all instructions invoked by the smart-contract require weight. +Unused weight is refunded after the call, regardless of the execution outcome. -If the gas limit is reached, then all calls and state changes (including balance transfers) are only -reverted at the current call's contract level. For example, if contract A calls B and B runs out of gas mid-call, +If the weight limit is reached, then all calls and state changes (including balance transfers) are only +reverted at the current call's contract level. For example, if contract A calls B and B runs out of weight mid-call, then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state changes still persist. -One gas is equivalent to one [weight](https://docs.substrate.io/v3/runtime/weights-and-fees) -which is defined as one picosecond of execution time on the runtime's reference machine. +One `ref_time` `Weight` is defined as one picosecond of execution time on the runtime's reference machine. ### Revert Behaviour @@ -43,29 +42,26 @@ Contract call failures are not cascading. When failures occur in a sub-call, the and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B fails, A can decide how to handle that failure, either proceeding or reverting A's changes. -### Offchain Execution +### Off-chain Execution In general, a contract execution needs to be deterministic so that all nodes come to the same conclusion when executing it. To that end we disallow any instructions that could cause indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts are executed off-chain and hence are not subject to consensus. If code is only executed by a single node and implicitly trusted by other actors is such a case. Trusted execution environments -come to mind. To that end we allow the execution of indeterminstic code for offchain usages +come to mind. To that end we allow the execution of indeterminstic code for off-chain usages with the following constraints: 1. No contract can ever be instantiated from an indeterministic code. The only way to execute the code is to use a delegate call from a deterministic contract. -2. The code that wants to use this feature needs to depend on `pallet-contracts` and use `bare_call` +2. The code that wants to use this feature needs to depend on `pallet-contracts` and use [`bare_call()`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/struct.Pallet.html#method.bare_call) directly. This makes sure that by default `pallet-contracts` does not expose any indeterminism. -## How to use - -When setting up the `Schedule` for your runtime make sure to set `InstructionWeights::fallback` -to a non zero value. The default is `0` and prevents the upload of any non deterministic code. +#### How to use An indeterministic code can be deployed on-chain by passing `Determinism::Relaxed` -to `upload_code`. A deterministic contract can then delegate call into it if and only if it -is ran by using `bare_call` and passing `Determinism::Relaxed` to it. **Never use +to [`upload_code()`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/struct.Pallet.html#method.upload_code). A deterministic contract can then delegate call into it if and only if it +is ran by using [`bare_call()`](https://paritytech.github.io/substrate/master/pallet_contracts/pallet/struct.Pallet.html#method.bare_call) and passing [`Determinism::Relaxed`](https://paritytech.github.io/substrate/master/pallet_contracts/enum.Determinism.html#variant.Relaxed) to it. **Never use this argument when the contract is called from an on-chain transaction.** ## Interface @@ -99,24 +95,22 @@ Each contract is one WebAssembly module that looks like this: ``` The documentation of all importable functions can be found -[here](https://github.com/paritytech/substrate/blob/master/frame/contracts/src/wasm/runtime.rs). -Look for the `define_env!` macro invocation. +[here](https://paritytech.github.io/substrate/master/pallet_contracts/api_doc/trait.Current.html). ## Usage This module executes WebAssembly smart contracts. These can potentially be written in any language -that compiles to web assembly. However, using a language that specifically targets this module -will make things a lot easier. One such language is [`ink!`](https://use.ink) -which is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables -writing WebAssembly based smart contracts in the Rust programming language. +that compiles to Wasm. However, using a language that specifically targets this module +will make things a lot easier. One such language is [`ink!`](https://use.ink). It enables +writing WebAssembly-based smart-contracts in the Rust programming language. ## Debugging -Contracts can emit messages to the client when called as RPC through the `seal_debug_message` +Contracts can emit messages to the client when called as RPC through the [`debug_message`](https://paritytech.github.io/substrate/master/pallet_contracts/api_doc/trait.Current.html#tymethod.debug_message) API. This is exposed in [ink!](https://use.ink) via [`ink_env::debug_message()`](https://paritytech.github.io/ink/ink_env/fn.debug_message.html). -Those messages are gathered into an internal buffer and send to the RPC client. +Those messages are gathered into an internal buffer and sent to the RPC client. It is up the the individual client if and how those messages are presented to the user. This buffer is also printed as a debug message. In order to see these messages on the node @@ -154,11 +148,11 @@ this pallet contains the concept of an unstable interface. Akin to the rust nigh it allows us to add new interfaces but mark them as unstable so that contract languages can experiment with them and give feedback before we stabilize those. -In order to access interfaces marked as `#[unstable]` in `runtime.rs` one need to set -`pallet_contracts::Config::UnsafeUnstableInterface` to `ConstU32`. It should be obvious +In order to access interfaces marked as `#[unstable]` in [`runtime.rs`](src/wasm/runtime.rs) one need to set +`pallet_contracts::Config::UnsafeUnstableInterface` to `ConstU32`. **It should be obvious that any production runtime should never be compiled with this feature: In addition to be subject to change or removal those interfaces might not have proper weights associated with -them and are therefore considered unsafe. +them and are therefore considered unsafe**. New interfaces are generally added as unstable and might go through several iterations before they are promoted to a stable interface. diff --git a/frame/contracts/build.rs b/frame/contracts/build.rs new file mode 100644 index 0000000000000..7817ace9c98e2 --- /dev/null +++ b/frame/contracts/build.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::io::Write; + +/// Get the latest migration version. +/// +/// Find the highest version number from the available migration files. +/// Each migration file should follow the naming convention `vXX.rs`, where `XX` is the version +/// number. +fn get_latest_version() -> u16 { + std::fs::read_dir("src/migration") + .expect("Folder `src/migration` not found.") + .filter_map(|entry| { + let file_name = entry.as_ref().ok()?.file_name(); + let file_name = file_name.to_str()?; + if file_name.starts_with('v') && file_name.ends_with(".rs") { + let version = &file_name[1..&file_name.len() - 3]; + let version = version.parse::().ok()?; + + // Ensure that the version matches the one defined in the file. + let path = entry.unwrap().path(); + let file_content = std::fs::read_to_string(&path).ok()?; + assert!( + file_content.contains(&format!("const VERSION: u16 = {}", version)), + "Invalid MigrationStep::VERSION in {:?}", + path + ); + + return Some(version) + } + None + }) + .max() + .expect("Failed to find any files matching the 'src/migration/vxx.rs' pattern.") +} + +/// Generates a module that exposes the latest migration version, and the benchmark migrations type. +fn main() -> Result<(), Box> { + let out_dir = std::env::var("OUT_DIR")?; + let path = std::path::Path::new(&out_dir).join("migration_codegen.rs"); + let mut f = std::fs::File::create(&path)?; + let version = get_latest_version(); + write!( + f, + " + pub mod codegen {{ + use crate::NoopMigration; + /// The latest migration version, pulled from the latest migration file. + pub const LATEST_MIGRATION_VERSION: u16 = {version}; + /// The Migration Steps used for benchmarking the migration framework. + pub type BenchMigrations = (NoopMigration<{}>, NoopMigration<{version}>); + }}", + version - 1, + )?; + + println!("cargo:rerun-if-changed=src/migration"); + Ok(()) +} diff --git a/frame/contracts/fixtures/add_remove_delegate_dependency.wat b/frame/contracts/fixtures/add_remove_delegate_dependency.wat new file mode 100644 index 0000000000000..ef456b6d620a3 --- /dev/null +++ b/frame/contracts/fixtures/add_remove_delegate_dependency.wat @@ -0,0 +1,111 @@ +;; This contract tests the behavior of adding / removing delegate_dependencies when delegate calling into a contract. +(module + (import "seal0" "add_delegate_dependency" (func $add_delegate_dependency (param i32))) + (import "seal0" "remove_delegate_dependency" (func $remove_delegate_dependency (param i32))) + (import "seal0" "input" (func $input (param i32 i32))) + (import "seal1" "terminate" (func $terminate (param i32))) + (import "seal0" "delegate_call" (func $delegate_call (param i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [100, 132) Address of Alice + (data (i32.const 100) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + ;; This function loads input data and performs the action specified. + ;; The first 4 bytes of the input specify the action to perform. + ;; The next 32 bytes specify the code hash to use when calling add_delegate_dependency or remove_delegate_dependency. + ;; Actions are: + ;; 1: call add_delegate_dependency + ;; 2: call remove_delegate_dependency. + ;; 3: call terminate. + ;; Any other value is a no-op. + (func $load_input + (local $action i32) + (local $code_hash_ptr i32) + + ;; Store available input size at offset 0. + (i32.store (i32.const 0) (i32.const 512)) + + ;; Read input data. + (call $input (i32.const 4) (i32.const 0)) + + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..8) - action to perform + ;; [8..42) - code hash of the callee + (set_local $action (i32.load (i32.const 4))) + (set_local $code_hash_ptr (i32.const 8)) + + ;; Assert input size == 36 (4 for action + 32 for code_hash). + (call $assert + (i32.eq + (i32.load (i32.const 0)) + (i32.const 36) + ) + ) + + ;; Call add_delegate_dependency when action == 1. + (if (i32.eq (get_local $action) (i32.const 1)) + (then + (call $add_delegate_dependency (get_local $code_hash_ptr)) + ) + (else) + ) + + ;; Call remove_delegate_dependency when action == 2. + (if (i32.eq (get_local $action) (i32.const 2)) + (then + (call $remove_delegate_dependency + (get_local $code_hash_ptr) + ) + ) + (else) + ) + + ;; Call terminate when action == 3. + (if (i32.eq (get_local $action) (i32.const 3)) + (then + (call $terminate + (i32.const 100) ;; Pointer to beneficiary address + ) + (unreachable) ;; terminate never returns + ) + (else) + ) + ) + + (func (export "deploy") + (call $load_input) + ) + + (func (export "call") + (call $load_input) + + ;; Delegate call into passed code hash. + (call $assert + (i32.eq + (call $delegate_call + (i32.const 0) ;; Set no call flags. + (i32.const 8) ;; Pointer to "callee" code_hash. + (i32.const 0) ;; Input is ignored. + (i32.const 0) ;; Length of the input. + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output. + (i32.const 0) ;; Length is ignored in this case. + ) + (i32.const 0) + ) + ) + ) + +) diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat index 7cc7335052e90..670f8e70172e7 100644 --- a/frame/contracts/fixtures/chain_extension.wat +++ b/frame/contracts/fixtures/chain_extension.wat @@ -1,7 +1,7 @@ ;; Call chain extension by passing through input and output of this contract (module - (import "seal0" "seal_call_chain_extension" - (func $seal_call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) + (import "seal0" "call_chain_extension" + (func $call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) ) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) @@ -30,7 +30,7 @@ (call $seal_input (i32.const 4) (i32.const 0)) ;; the chain extension passes through the input and returns it as output - (call $seal_call_chain_extension + (call $call_chain_extension (i32.load (i32.const 4)) ;; id (i32.const 4) ;; input_ptr (i32.load (i32.const 0)) ;; input_len diff --git a/frame/contracts/fixtures/dummy.wat b/frame/contracts/fixtures/dummy.wat index 0aeefbcb7ebfe..a6435e49df222 100644 --- a/frame/contracts/fixtures/dummy.wat +++ b/frame/contracts/fixtures/dummy.wat @@ -1,5 +1,6 @@ ;; A valid contract which does nothing at all (module + (import "env" "memory" (memory 1 1)) (func (export "deploy")) (func (export "call")) ) diff --git a/frame/contracts/fixtures/float_instruction.wat b/frame/contracts/fixtures/float_instruction.wat index c19b5c12cdcec..efa6b9de52de6 100644 --- a/frame/contracts/fixtures/float_instruction.wat +++ b/frame/contracts/fixtures/float_instruction.wat @@ -1,5 +1,6 @@ ;; Module that contains a float instruction which is illegal in deterministic mode (module + (import "env" "memory" (memory 1 1)) (func (export "call") f32.const 1 drop diff --git a/frame/contracts/fixtures/invalid_contract.wat b/frame/contracts/fixtures/invalid_contract_no_call.wat similarity index 68% rename from frame/contracts/fixtures/invalid_contract.wat rename to frame/contracts/fixtures/invalid_contract_no_call.wat index 085569000c559..34f7c99ba85e4 100644 --- a/frame/contracts/fixtures/invalid_contract.wat +++ b/frame/contracts/fixtures/invalid_contract_no_call.wat @@ -1,4 +1,5 @@ ;; Valid module but missing the call function (module + (import "env" "memory" (memory 1 1)) (func (export "deploy")) ) diff --git a/frame/contracts/fixtures/invalid_contract_no_memory.wat b/frame/contracts/fixtures/invalid_contract_no_memory.wat new file mode 100644 index 0000000000000..0aeefbcb7ebfe --- /dev/null +++ b/frame/contracts/fixtures/invalid_contract_no_memory.wat @@ -0,0 +1,5 @@ +;; A valid contract which does nothing at all +(module + (func (export "deploy")) + (func (export "call")) +) diff --git a/frame/contracts/fixtures/run_out_of_gas.wat b/frame/contracts/fixtures/run_out_of_gas.wat index 52ee92539fd52..fe53e92c4fa84 100644 --- a/frame/contracts/fixtures/run_out_of_gas.wat +++ b/frame/contracts/fixtures/run_out_of_gas.wat @@ -1,4 +1,5 @@ (module + (import "env" "memory" (memory 1 1)) (func (export "call") (loop $inf (br $inf)) ;; just run out of gas (unreachable) diff --git a/frame/contracts/fixtures/seal_input_noop.wat b/frame/contracts/fixtures/seal_input_noop.wat new file mode 100644 index 0000000000000..7b5a1e32af4d6 --- /dev/null +++ b/frame/contracts/fixtures/seal_input_noop.wat @@ -0,0 +1,14 @@ +;; Everything prepared for the host function call, but no call is performed. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) buffer to write input + + ;; [8, 12) size of the input buffer + (data (i32.const 8) "\04") + + (func (export "call")) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/seal_input_once.wat b/frame/contracts/fixtures/seal_input_once.wat new file mode 100644 index 0000000000000..919a03a9b6903 --- /dev/null +++ b/frame/contracts/fixtures/seal_input_once.wat @@ -0,0 +1,22 @@ +;; Stores a value of the passed size. The host function is called once. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) buffer to write input + + ;; [8, 12) size of the input buffer + (data (i32.const 8) "\04") + + (func (export "call") + ;; instructions to consume engine fuel + (drop + (i32.const 42) + ) + + (call $seal_input (i32.const 0) (i32.const 8)) + + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/seal_input_twice.wat b/frame/contracts/fixtures/seal_input_twice.wat new file mode 100644 index 0000000000000..3a8be814efb04 --- /dev/null +++ b/frame/contracts/fixtures/seal_input_twice.wat @@ -0,0 +1,28 @@ +;; Stores a value of the passed size. The host function is called twice. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) buffer to write input + + ;; [8, 12) size of the input buffer + (data (i32.const 8) "\04") + + (func (export "call") + ;; instructions to consume engine fuel + (drop + (i32.const 42) + ) + + (call $seal_input (i32.const 0) (i32.const 8)) + + ;; instructions to consume engine fuel + (drop + (i32.const 42) + ) + + (call $seal_input (i32.const 0) (i32.const 8)) + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/primitives/Cargo.toml index b643f03a9d511..facfe34e1bc50 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "7.0.0" +version = "24.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } # Substrate Dependencies (This crate should not rely on frame) -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../../primitives/weights" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-weights = { version = "20.0.0", default-features = false, path = "../../../primitives/weights" } [features] default = ["std"] @@ -29,4 +29,5 @@ std = [ "sp-runtime/std", "sp-std/std", "scale-info/std", + "sp-weights/std" ] diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index cc21e29e67b3d..c33149285004b 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use bitflags::bitflags; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, @@ -39,7 +39,7 @@ use sp_weights::Weight; /// It has been extended to include `events` at the end of the struct while not bumping the /// `ContractsApi` version. Therefore when SCALE decoding a `ContractResult` its trailing data /// should be ignored to avoid any potential compatibility issues. -#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ContractResult { /// How much weight was consumed during execution. pub gas_consumed: Weight, @@ -99,12 +99,14 @@ pub type CodeUploadResult = pub type GetStorageResult = Result>, ContractAccessError>; /// The possible errors that can happen querying the storage of a contract. -#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo)] pub enum ContractAccessError { /// The given address doesn't point to a contract. DoesntExist, /// Storage key cannot be decoded from the provided input data. KeyDecodingFailed, + /// Storage is migrating. Try again later. + MigrationInProgress, } bitflags! { @@ -117,7 +119,7 @@ bitflags! { } /// Output of a contract call or instantiation which ran to completion. -#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, @@ -133,7 +135,7 @@ impl ExecReturnValue { } /// The result of a successful contract instantiation. -#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct InstantiateReturnValue { /// The output of the called constructor. pub result: ExecReturnValue, @@ -142,7 +144,7 @@ pub struct InstantiateReturnValue { } /// The result of successfully uploading a contract. -#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. pub code_hash: CodeHash, @@ -151,7 +153,7 @@ pub struct CodeUploadReturnValue { } /// Reference to an existing code hash or a new wasm module. -#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum Code { /// A wasm module as raw bytes. Upload(Vec), @@ -159,14 +161,10 @@ pub enum Code { Existing(Hash), } -impl>, Hash> From for Code { - fn from(from: T) -> Self { - Code::Upload(from.into()) - } -} - /// The amount of balance that was either charged or refunded in order to pay for storage. -#[derive(Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, Clone, TypeInfo)] +#[derive( + Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo, +)] pub enum StorageDeposit { /// The transaction reduced storage consumption. /// diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml index c700d1e335639..8a63875f2a980 100644 --- a/frame/contracts/proc-macro/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -16,8 +16,8 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full"] } [dev-dependencies] diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index a6a8187bc8aaa..b31403c29adfd 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -624,19 +624,15 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) let trace_fmt_str = format!("{}::{}({}) = {{:?}}\n", module, name, params_fmt_str); quote! { + let result = #body; if ::log::log_enabled!(target: "runtime::contracts::strace", ::log::Level::Trace) { - let result = #body; - { use sp_std::fmt::Write; let mut w = sp_std::Writer::default(); let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result); let msg = core::str::from_utf8(&w.inner()).unwrap_or_default(); ctx.ext().append_debug_buffer(msg); - } - result - } else { - #body } + result } }; @@ -661,7 +657,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) ::core::unreachable!() } } }; - let map_err = if expand_blocks { + let into_host = if expand_blocks { quote! { |reason| { ::wasmi::core::Trap::from(reason) @@ -677,6 +673,43 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) } else { quote! { #[allow(unused_variables)] } }; + let sync_gas_before = if expand_blocks { + quote! { + // Gas left in the gas meter right before switching to engine execution. + let __gas_before__ = { + let engine_consumed_total = + __caller__.fuel_consumed().expect("Fuel metering is enabled; qed"); + let gas_meter = __caller__.data_mut().ext().gas_meter_mut(); + gas_meter + .charge_fuel(engine_consumed_total) + .map_err(TrapReason::from) + .map_err(#into_host)? + .ref_time() + }; + } + } else { + quote! { } + }; + // Gas left in the gas meter right after returning from engine execution. + let sync_gas_after = if expand_blocks { + quote! { + let mut gas_after = __caller__.data_mut().ext().gas_meter().gas_left().ref_time(); + let mut host_consumed = __gas_before__.saturating_sub(gas_after); + // Possible undercharge of at max 1 fuel here, if host consumed less than `instruction_weights.base` + // Not a problem though, as soon as host accounts its spent gas properly. + let fuel_consumed = host_consumed + .checked_div(__caller__.data_mut().ext().schedule().instruction_weights.base as u64) + .ok_or(Error::::InvalidSchedule) + .map_err(TrapReason::from) + .map_err(#into_host)?; + __caller__ + .consume_fuel(fuel_consumed) + .map_err(|_| TrapReason::from(Error::::OutOfGas)) + .map_err(#into_host)?; + } + } else { + quote! { } + }; quote! { // We need to allow all interfaces when runtime benchmarks are performed because @@ -688,10 +721,11 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) { #allow_unused linker.define(#module, #name, ::wasmi::Func::wrap(&mut*store, |mut __caller__: ::wasmi::Caller<#host_state>, #( #params, )*| -> #wasm_output { + #sync_gas_before let mut func = #inner; - func() - .map_err(#map_err) - .map(::core::convert::Into::into) + let result = func().map_err(#into_host).map(::core::convert::Into::into); + #sync_gas_after + result }))?; } } diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 07f24f385b035..2f50611b41c21 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -24,18 +24,15 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::{Config, Determinism}; +use crate::Config; use frame_support::traits::Get; use sp_runtime::traits::Hash; use sp_std::{borrow::ToOwned, prelude::*}; -use wasm_instrument::{ - gas_metering, - parity_wasm::{ - builder, - elements::{ - self, BlockType, CustomSection, External, FuncBody, Instruction, Instructions, Module, - Section, ValueType, - }, +use wasm_instrument::parity_wasm::{ + builder, + elements::{ + self, BlockType, CustomSection, External, FuncBody, Instruction, Instructions, Module, + Section, ValueType, }, }; @@ -125,7 +122,7 @@ impl From for WasmModule { // internal functions start at that offset. let func_offset = u32::try_from(def.imported_functions.len()).unwrap(); - // Every contract must export "deploy" and "call" functions + // Every contract must export "deploy" and "call" functions. let mut contract = builder::module() // deploy function (first internal function) .function() @@ -166,15 +163,16 @@ impl From for WasmModule { } // Grant access to linear memory. - if let Some(memory) = &def.memory { - contract = contract - .import() - .module("env") - .field("memory") - .external() - .memory(memory.min_pages, Some(memory.max_pages)) - .build(); - } + // Every contract module is required to have an imported memory. + // If no memory is specified in the passed ModuleDefenition, then + // default to (1, 1). + let (init, max) = if let Some(memory) = &def.memory { + (memory.min_pages, Some(memory.max_pages)) + } else { + (1, Some(1)) + }; + + contract = contract.import().path("env", "memory").external().memory(init, max).build(); // Import supervisor functions. They start with idx 0. for func in def.imported_functions { @@ -240,15 +238,9 @@ impl From for WasmModule { } impl WasmModule { - /// Uses the supplied wasm module and instruments it when requested. - pub fn instrumented(code: &[u8], inject_gas: bool) -> Self { - let module = { - let mut module = Module::from_bytes(code).unwrap(); - if inject_gas { - module = inject_gas_metering::(module); - } - module - }; + /// Uses the supplied wasm module. + pub fn from_code(code: &[u8]) -> Self { + let module = Module::from_bytes(code).unwrap(); let limits = *module .import_section() .unwrap() @@ -366,37 +358,13 @@ impl WasmModule { } .into() } - - pub fn unary_instr(instr: Instruction, repeat: u32) -> Self { - use body::DynInstr::{RandomI64Repeated, Regular}; - ModuleDefinition { - call_body: Some(body::repeated_dyn( - repeat, - vec![RandomI64Repeated(1), Regular(instr), Regular(Instruction::Drop)], - )), - ..Default::default() - } - .into() - } - - pub fn binary_instr(instr: Instruction, repeat: u32) -> Self { - use body::DynInstr::{RandomI64Repeated, Regular}; - ModuleDefinition { - call_body: Some(body::repeated_dyn( - repeat, - vec![RandomI64Repeated(2), Regular(instr), Regular(Instruction::Drop)], - )), - ..Default::default() - } - .into() - } } /// Mechanisms to generate a function body that can be used inside a `ModuleDefinition`. pub mod body { use super::*; - /// When generating contract code by repeating a wasm sequence, it's sometimes necessary + /// When generating contract code by repeating a Wasm sequence, it's sometimes necessary /// to change those instructions on each repetition. The variants of this enum describe /// various ways in which this can happen. pub enum DynInstr { @@ -405,31 +373,8 @@ pub mod body { /// Insert a I32Const with incrementing value for each insertion. /// (start_at, increment_by) Counter(u32, u32), - /// Insert a I32Const with a random value in [low, high) not divisible by two. - /// (low, high) - RandomUnaligned(u32, u32), - /// Insert a I32Const with a random value in [low, high). - /// (low, high) - RandomI32(i32, i32), - /// Insert the specified amount of I32Const with a random value. - RandomI32Repeated(usize), /// Insert the specified amount of I64Const with a random value. RandomI64Repeated(usize), - /// Insert a GetLocal with a random offset in [low, high). - /// (low, high) - RandomGetLocal(u32, u32), - /// Insert a SetLocal with a random offset in [low, high). - /// (low, high) - RandomSetLocal(u32, u32), - /// Insert a TeeLocal with a random offset in [low, high). - /// (low, high) - RandomTeeLocal(u32, u32), - /// Insert a GetGlobal with a random offset in [low, high). - /// (low, high) - RandomGetGlobal(u32, u32), - /// Insert a SetGlobal with a random offset in [low, high). - /// (low, high) - RandomSetGlobal(u32, u32), } pub fn plain(instructions: Vec) -> FuncBody { @@ -466,53 +411,16 @@ pub mod body { *offset += *increment_by; vec![Instruction::I32Const(current as i32)] }, - DynInstr::RandomUnaligned(low, high) => { - let unaligned = rng.gen_range(*low..*high) | 1; - vec![Instruction::I32Const(unaligned as i32)] - }, - DynInstr::RandomI32(low, high) => { - vec![Instruction::I32Const(rng.gen_range(*low..*high))] - }, - DynInstr::RandomI32Repeated(num) => - (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I32Const).collect(), DynInstr::RandomI64Repeated(num) => (&mut rng).sample_iter(Standard).take(*num).map(Instruction::I64Const).collect(), - DynInstr::RandomGetLocal(low, high) => { - vec![Instruction::GetLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomSetLocal(low, high) => { - vec![Instruction::SetLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomTeeLocal(low, high) => { - vec![Instruction::TeeLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomGetGlobal(low, high) => { - vec![Instruction::GetGlobal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomSetGlobal(low, high) => { - vec![Instruction::SetGlobal(rng.gen_range(*low..*high))] - }, }) .chain(sp_std::iter::once(Instruction::End)) .collect(); FuncBody::new(Vec::new(), Instructions::new(body)) } - - /// Replace the locals of the supplied `body` with `num` i64 locals. - pub fn inject_locals(body: &mut FuncBody, num: u32) { - use self::elements::Local; - *body.locals_mut() = vec![Local::new(num, ValueType::I64)]; - } } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. pub fn max_pages() -> u32 { T::Schedule::get().limits.memory_pages } - -fn inject_gas_metering(module: Module) -> Module { - let schedule = T::Schedule::get(); - let gas_rules = schedule.rules(Determinism::Enforced); - let backend = gas_metering::host_function::Injector::new("seal0", "gas"); - gas_metering::inject(module, backend, &gas_rules).unwrap() -} diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index fa9417a59042d..84e46e47b609f 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -21,7 +21,6 @@ mod code; mod sandbox; - use self::{ code::{ body::{self, DynInstr::*}, @@ -31,19 +30,23 @@ use self::{ }; use crate::{ exec::{AccountIdOf, Key}, + migration::{codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, MigrationStep}, wasm::CallFlags, Pallet as Contracts, *, }; use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; -use frame_support::weights::Weight; -use frame_system::RawOrigin; -use sp_runtime::{ - traits::{Bounded, Hash}, - Perbill, +use frame_support::{ + self, + pallet_prelude::StorageVersion, + traits::{fungible::InspectHold, Currency}, + weights::Weight, }; +use frame_system::RawOrigin; +use pallet_balances; +use sp_runtime::traits::{Bounded, Hash}; use sp_std::prelude::*; -use wasm_instrument::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; +use wasm_instrument::parity_wasm::elements::{BlockType, Instruction, ValueType}; /// How many runs we do per API benchmark. /// @@ -66,8 +69,9 @@ struct Contract { value: BalanceOf, } -impl Contract +impl Contract where + T: Config + pallet_balances::Config, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { /// Create new contract and use a default account id as instantiator. @@ -91,7 +95,7 @@ where data: Vec, ) -> Result, &'static str> { let value = Pallet::::min_balance(); - T::Currency::make_free_balance_be(&caller, caller_funding::()); + T::Currency::set_balance(&caller, caller_funding::()); let salt = vec![0xff]; let addr = Contracts::::contract_address(&caller, &module.hash, &data, &salt); @@ -157,27 +161,25 @@ where /// Set the balance of the contract to the supplied amount. fn set_balance(&self, balance: BalanceOf) { - T::Currency::make_free_balance_be(&self.account_id, balance); + T::Currency::set_balance(&self.account_id, balance); } /// Returns `true` iff all storage entries related to code storage exist. fn code_exists(hash: &CodeHash) -> bool { - >::contains_key(hash) && - >::contains_key(&hash) && - >::contains_key(&hash) + >::contains_key(hash) && >::contains_key(&hash) } /// Returns `true` iff no storage entry related to code storage exist. fn code_removed(hash: &CodeHash) -> bool { - !>::contains_key(hash) && - !>::contains_key(&hash) && - !>::contains_key(&hash) + !>::contains_key(hash) && !>::contains_key(&hash) } } /// The funding that each account that either calls or instantiates contracts is funded with. fn caller_funding() -> BalanceOf { - BalanceOf::::max_value() / 2u32.into() + // Minting can overflow, so we can't abuse of the funding. This value happens to be big enough, + // but not too big to make the total supply overflow. + BalanceOf::::max_value() / 10_000u32.into() } /// Load the specified contract file from disk by including it into the runtime. @@ -201,6 +203,9 @@ macro_rules! load_benchmark { benchmarks! { where_clause { where as codec::HasCompact>::Type: Clone + Eq + PartialEq + sp_std::fmt::Debug + scale_info::TypeInfo + codec::Encode, + T: Config + pallet_balances::Config, + BalanceOf: From< as Currency>::Balance>, + as Currency>::Balance: From>, } // The base weight consumed on processing contracts deletion queue. @@ -219,19 +224,136 @@ benchmarks! { ContractInfo::::process_deletion_queue_batch(Weight::MAX) } - // This benchmarks the additional weight that is charged when a contract is executed the - // first time after a new schedule was deployed: For every new schedule a contract needs - // to re-run the instrumentation once. + // This benchmarks the v9 migration step (update codeStorage). #[pov_mode = Measured] - reinstrument { - let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get()); - let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); - Contracts::::store_code_raw(code, whitelisted_caller())?; - let schedule = T::Schedule::get(); - let mut gas_meter = GasMeter::new(Weight::MAX); - let mut module = PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter)?; + v9_migration_step { + let c in 0 .. T::MaxCodeLen::get(); + v09::store_old_dummy_code::(c as usize); + let mut m = v09::Migration::::default(); + }: { + m.step(); + } + + // This benchmarks the v10 migration step (use dedicated deposit_account). + #[pov_mode = Measured] + v10_migration_step { + let contract = >::with_caller( + whitelisted_caller(), WasmModule::dummy(), vec![], + )?; + + v10::store_old_contract_info::>(contract.account_id.clone(), contract.info()?); + let mut m = v10::Migration::>::default(); + }: { + m.step(); + } + + // This benchmarks the v11 migration step (Don't rely on reserved balances keeping an account alive). + #[pov_mode = Measured] + v11_migration_step { + let k in 0 .. 1024; + v11::fill_old_queue::(k as usize); + let mut m = v11::Migration::::default(); + }: { + m.step(); + } + + // This benchmarks the v12 migration step (Move `OwnerInfo` to `CodeInfo`, + // add `determinism` field to the latter, clear `CodeStorage` + // and repay deposits). + #[pov_mode = Measured] + v12_migration_step { + let c in 0 .. T::MaxCodeLen::get(); + v12::store_old_dummy_code::< + T, + pallet_balances::Pallet + >(c as usize, account::("account", 0, 0)); + let mut m = v12::Migration::>::default(); + }: { + m.step(); + } + + // This benchmarks the v13 migration step (Add delegate_dependencies field). + #[pov_mode = Measured] + v13_migration_step { + let contract = >::with_caller( + whitelisted_caller(), WasmModule::dummy(), vec![], + )?; + + v13::store_old_contract_info::(contract.account_id.clone(), contract.info()?); + let mut m = v13::Migration::::default(); + }: { + m.step(); + } + + // This benchmarks the v14 migration step (Move code owners' reserved balance to be held instead). + #[pov_mode = Measured] + v14_migration_step { + let account = account::("account", 0, 0); + T::Currency::set_balance(&account, caller_funding::()); + v14::store_dummy_code::>(account); + let mut m = v14::Migration::>::default(); }: { - Contracts::::reinstrument_module(&mut module, &schedule)?; + m.step(); + } + + // This benchmarks the weight of executing Migration::migrate to execute a noop migration. + #[pov_mode = Measured] + migration_noop { + let version = LATEST_MIGRATION_VERSION; + assert_eq!(StorageVersion::get::>(), version); + }: { + Migration::::migrate(Weight::MAX) + } verify { + assert_eq!(StorageVersion::get::>(), version); + } + + // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigraton` + #[pov_mode = Measured] + migrate { + let latest_version = LATEST_MIGRATION_VERSION; + StorageVersion::new(latest_version - 2).put::>(); + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); + let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller.clone()); + }: _(origin, Weight::MAX) + verify { + assert_eq!(StorageVersion::get::>(), latest_version - 1); + } + + // This benchmarks the weight of running on_runtime_upgrade when there are no migration in progress. + #[pov_mode = Measured] + on_runtime_upgrade_noop { + let latest_version = LATEST_MIGRATION_VERSION; + assert_eq!(StorageVersion::get::>(), latest_version); + }: { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() + } verify { + assert!(MigrationInProgress::::get().is_none()); + } + + // This benchmarks the weight of running on_runtime_upgrade when there is a migration in progress. + #[pov_mode = Measured] + on_runtime_upgrade_in_progress { + let latest_version = LATEST_MIGRATION_VERSION; + StorageVersion::new(latest_version - 2).put::>(); + let v = vec![42u8].try_into().ok(); + MigrationInProgress::::set(v.clone()); + }: { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() + } verify { + assert!(MigrationInProgress::::get().is_some()); + assert_eq!(MigrationInProgress::::get(), v); + } + + // This benchmarks the weight of running on_runtime_upgrade when there is a migration to process. + #[pov_mode = Measured] + on_runtime_upgrade { + let latest_version = LATEST_MIGRATION_VERSION; + StorageVersion::new(latest_version - 2).put::>(); + }: { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() + } verify { + assert!(MigrationInProgress::::get().is_some()); } // This benchmarks the overhead of loading a code of size `c` byte from storage and into @@ -260,36 +382,31 @@ benchmarks! { // `c`: Size of the code in bytes. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. - // - // # Note - // - // We cannot let `c` grow to the maximum code size because the code is not allowed - // to be larger than the maximum size **after instrumentation**. #[pov_mode = Measured] instantiate_with_code { - let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get()); + let c in 0 .. T::MaxCodeLen::get(); let i in 0 .. code::max_pages::() * 64 * 1024; let s in 0 .. code::max_pages::() * 64 * 1024; let input = vec![42u8; i as usize]; let salt = vec![42u8; s as usize]; let value = Pallet::::min_balance(); let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, caller_funding::()); + T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); }: _(origin, value, Weight::MAX, None, code, input, salt) verify { let deposit_account = Contract::::address_info(&addr)?.deposit_account().clone(); - let deposit = T::Currency::free_balance(&deposit_account); + let deposit = T::Currency::balance(&deposit_account); // uploading the code reserves some balance in the callers account - let code_deposit = T::Currency::reserved_balance(&caller); + let code_deposit = T::Currency::total_balance_on_hold(&caller); assert_eq!( - T::Currency::free_balance(&caller), + T::Currency::balance(&caller), caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), ); // contract has the full value - assert_eq!(T::Currency::free_balance(&addr), value + Pallet::::min_balance()); + assert_eq!(T::Currency::balance(&addr), value + Pallet::::min_balance()); } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. @@ -303,7 +420,7 @@ benchmarks! { let salt = vec![42u8; s as usize]; let value = Pallet::::min_balance(); let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, caller_funding::()); + T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); @@ -311,14 +428,14 @@ benchmarks! { }: _(origin, value, Weight::MAX, None, hash, input, salt) verify { let deposit_account = Contract::::address_info(&addr)?.deposit_account().clone(); - let deposit = T::Currency::free_balance(&deposit_account); + let deposit = T::Currency::balance(&deposit_account); // value was removed from the caller assert_eq!( - T::Currency::free_balance(&caller), + T::Currency::balance(&caller), caller_funding::() - value - deposit - Pallet::::min_balance(), ); // contract has the full value - assert_eq!(T::Currency::free_balance(&addr), value + Pallet::::min_balance()); + assert_eq!(T::Currency::balance(&addr), value + Pallet::::min_balance()); } // We just call a dummy contract to measure the overhead of the call extrinsic. @@ -338,18 +455,18 @@ benchmarks! { let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); - let before = T::Currency::free_balance(&instance.account_id); - let before_deposit = T::Currency::free_balance(&deposit_account); + let before = T::Currency::balance(&instance.account_id); + let before_deposit = T::Currency::balance(&deposit_account); }: _(origin, callee, value, Weight::MAX, None, data) verify { - let deposit = T::Currency::free_balance(&deposit_account); + let deposit = T::Currency::balance(&deposit_account); // value and value transferred via call should be removed from the caller assert_eq!( - T::Currency::free_balance(&instance.caller), + T::Currency::balance(&instance.caller), caller_funding::() - instance.value - value - deposit - Pallet::::min_balance(), ); // contract should have received the value - assert_eq!(T::Currency::free_balance(&instance.account_id), before + value); + assert_eq!(T::Currency::balance(&instance.account_id), before + value); // contract should still exist instance.info()?; } @@ -357,42 +474,37 @@ benchmarks! { // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. // `c`: Size of the code in bytes. - // - // # Note - // - // We cannot let `c` grow to the maximum code size because the code is not allowed - // to be larger than the maximum size **after instrumentation**. #[pov_mode = Measured] upload_code { - let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get()); + let c in 0 .. T::MaxCodeLen::get(); let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, caller_funding::()); + T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); let origin = RawOrigin::Signed(caller.clone()); }: _(origin, code, None, Determinism::Enforced) verify { // uploading the code reserves some balance in the callers account - assert!(T::Currency::reserved_balance(&caller) > 0u32.into()); + assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); assert!(>::code_exists(&hash)); } // Removing code does not depend on the size of the contract because all the information // needed to verify the removal claim (refcount, owner) is stored in a separate storage - // item (`OwnerInfoOf`). + // item (`CodeInfoOf`). #[pov_mode = Measured] remove_code { let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, caller_funding::()); + T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Enforced)?; assert_eq!(uploaded.code_hash, hash); - assert_eq!(uploaded.deposit, T::Currency::reserved_balance(&caller)); + assert_eq!(uploaded.deposit, T::Currency::total_balance_on_hold(&caller)); assert!(>::code_exists(&hash)); }: _(origin, hash) verify { // removing the code should have unreserved the deposit - assert_eq!(T::Currency::reserved_balance(&caller), 0u32.into()); + assert_eq!(T::Currency::total_balance_on_hold(&caller), 0u32.into()); assert!(>::code_removed(&hash)); } @@ -648,27 +760,6 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - #[pov_mode = Measured] - seal_gas { - let r in 0 .. API_BENCHMARK_RUNS; - let code = WasmModule::::from(ModuleDefinition { - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "gas", - params: vec![ValueType::I64], - return_type: None, - }], - call_body: Some(body::repeated(r, &[ - Instruction::I64Const(42), - Instruction::Call(0), - ])), - .. Default::default() - }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - #[pov_mode = Measured] seal_input { let r in 0 .. API_BENCHMARK_RUNS; @@ -785,20 +876,51 @@ benchmarks! { let beneficiary = account::("beneficiary", 0, 0); let beneficiary_bytes = beneficiary.encode(); let beneficiary_len = beneficiary_bytes.len(); + let caller = whitelisted_caller(); + + T::Currency::set_balance(&caller, caller_funding::()); + + // Maximize the delegate_dependencies to account for the worst-case scenario. + let code_hashes = (0..T::MaxDelegateDependencies::get()) + .map(|i| { + let new_code = WasmModule::::dummy_with_bytes(65 + i); + Contracts::::store_code_raw(new_code.code, caller.clone())?; + Ok(new_code.hash) + }) + .collect::, &'static str>>()?; + let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); + let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); + let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_terminate", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], + imported_functions: vec![ + ImportedFunction { + module: "seal0", + name: "seal_terminate", + params: vec![ValueType::I32, ValueType::I32], + return_type: None, + }, + ImportedFunction { + module: "seal0", + name: "add_delegate_dependency", + params: vec![ValueType::I32], + return_type: None, + } + ], data_segments: vec![ DataSegment { offset: 0, value: beneficiary_bytes, }, + DataSegment { + offset: beneficiary_len as u32, + value: code_hashes_bytes, + }, ], + deploy_body: Some(body::repeated_dyn(r, vec![ + Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(1)), + ])), call_body: Some(body::repeated(r, &[ Instruction::I32Const(0), // beneficiary_ptr Instruction::I32Const(beneficiary_len as i32), // beneficiary_len @@ -809,15 +931,15 @@ benchmarks! { let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); let deposit_account = instance.info()?.deposit_account().clone(); - assert_eq!(>::total_balance(&beneficiary), 0u32.into()); - assert_eq!(T::Currency::free_balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); - assert_ne!(T::Currency::free_balance(&deposit_account), 0u32.into()); + assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); + assert_eq!(T::Currency::balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); + assert_ne!(T::Currency::balance(&deposit_account), 0u32.into()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { - assert_eq!(>::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(>::total_balance(&deposit_account), 0u32.into()); - assert_eq!(>::total_balance(&beneficiary), Pallet::::min_balance() * 2u32.into()); + assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); + assert_eq!(T::Currency::total_balance(&deposit_account), 0u32.into()); + assert_eq!(T::Currency::total_balance(&beneficiary), Pallet::::min_balance() * 2u32.into()); } } @@ -963,7 +1085,7 @@ benchmarks! { // or maximum allowed debug buffer size, whichever is less. let i in 0 .. (T::Schedule::get().limits.memory_pages * 64 * 1024).min(T::MaxDebugBufferLen::get()); // We benchmark versus messages containing printable ASCII codes. - // About 1Kb goes to the instrumented contract code instructions, + // About 1Kb goes to the contract code instructions, // whereas all the space left we use for the initialization of the debug messages data. let message = (0 .. T::MaxCodeLen::get() - 1024).zip((32..127).cycle()).map(|i| i.1).collect::>(); let code = WasmModule::::from(ModuleDefinition { @@ -1586,12 +1708,12 @@ benchmarks! { instance.set_balance(value * (r + 1).into()); let origin = RawOrigin::Signed(instance.caller.clone()); for account in &accounts { - assert_eq!(>::total_balance(account), 0u32.into()); + assert_eq!(T::Currency::total_balance(account), 0u32.into()); } }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) verify { for account in &accounts { - assert_eq!(>::total_balance(account), value); + assert_eq!(T::Currency::total_balance(account), value); } } @@ -1670,7 +1792,7 @@ benchmarks! { }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, Some(BalanceOf::::from(u32::MAX).into()), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, Some(BalanceOf::::from(u32::MAX.into()).into()), vec![]) // This is a slow call: We redeuce the number of runs. #[pov_mode = Measured] @@ -1679,7 +1801,9 @@ benchmarks! { let hashes = (0..r) .map(|i| { let code = WasmModule::::dummy_with_bytes(i); - Contracts::::store_code_raw(code.code, whitelisted_caller())?; + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(code.code, caller)?; Ok(code.hash) }) .collect::, &'static str>>()?; @@ -1799,7 +1923,9 @@ benchmarks! { ])), .. Default::default() }); - Contracts::::store_code_raw(code.code, whitelisted_caller())?; + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(code.code, caller)?; Ok(code.hash) }) .collect::, &'static str>>()?; @@ -1903,7 +2029,9 @@ benchmarks! { let hash = callee_code.hash; let hash_bytes = callee_code.hash.encode(); let hash_len = hash_bytes.len(); - Contracts::::store_code_raw(callee_code.code, whitelisted_caller())?; + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(callee_code.code, caller)?; let value: BalanceOf = t.into(); let value_bytes = value.encode(); @@ -2245,7 +2373,9 @@ benchmarks! { let code_hashes = (0..r) .map(|i| { let new_code = WasmModule::::dummy_with_bytes(i); - Contracts::::store_code_raw(new_code.code, whitelisted_caller())?; + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(new_code.code, caller)?; Ok(new_code.hash) }) .collect::, &'static str>>()?; @@ -2280,6 +2410,93 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[pov_mode = Measured] + add_delegate_dependency { + let r in 0 .. T::MaxDelegateDependencies::get(); + let code_hashes = (0..r) + .map(|i| { + let new_code = WasmModule::::dummy_with_bytes(65 + i); + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(new_code.code, caller)?; + Ok(new_code.hash) + }) + .collect::, &'static str>>()?; + let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); + let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); + + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + module: "seal0", + name: "add_delegate_dependency", + params: vec![ValueType::I32], + return_type: None, + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: code_hashes_bytes, + }, + ], + call_body: Some(body::repeated_dyn(r, vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(0)), + ])), + .. Default::default() + }); + let instance = Contract::::new(code, vec![])?; + let origin = RawOrigin::Signed(instance.caller.clone()); + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + + remove_delegate_dependency { + let r in 0 .. T::MaxDelegateDependencies::get(); + let code_hashes = (0..r) + .map(|i| { + let new_code = WasmModule::::dummy_with_bytes(65 + i); + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + Contracts::::store_code_raw(new_code.code, caller)?; + Ok(new_code.hash) + }) + .collect::, &'static str>>()?; + + let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); + let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); + + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + module: "seal0", + name: "remove_delegate_dependency", + params: vec![ValueType::I32], + return_type: None, + }, ImportedFunction { + module: "seal0", + name: "add_delegate_dependency", + params: vec![ValueType::I32], + return_type: None + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: code_hashes_bytes, + }, + ], + deploy_body: Some(body::repeated_dyn(r, vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(1)), + ])), + call_body: Some(body::repeated_dyn(r, vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(0)), + ])), + .. Default::default() + }); + let instance = Contract::::new(code, vec![])?; + let origin = RawOrigin::Signed(instance.caller.clone()); + }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[pov_mode = Measured] seal_reentrance_count { let r in 0 .. API_BENCHMARK_RUNS; @@ -2357,15 +2574,10 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) // We make the assumption that pushing a constant and dropping a value takes roughly - // the same amount of time. We follow that `t.load` and `drop` both have the weight - // of this benchmark / 2. We need to make this assumption because there is no way - // to measure them on their own using a valid wasm module. We need their individual - // values to derive the weight of individual instructions (by subtraction) from - // benchmarks that include those for parameter pushing and return type dropping. - // We call the weight of `t.load` and `drop`: `w_param`. + // the same amount of time. We call this weight `w_base`. // The weight that would result from the respective benchmark we call: `w_bench`. // - // w_i{32,64}const = w_drop = w_bench / 2 + // w_base = w_i{32,64}const = w_drop = w_bench / 2 #[pov_mode = Ignored] instr_i64const { let r in 0 .. INSTR_BENCHMARK_RUNS; @@ -2380,836 +2592,67 @@ benchmarks! { sbox.invoke(); } - // w_i{32,64}load = w_bench - 2 * w_param - #[pov_mode = Ignored] - instr_i64load { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated_dyn(r, vec![ - RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), - Regular(Instruction::I64Load(3, 0)), - Regular(Instruction::Drop), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_i{32,64}store{...} = w_bench - 2 * w_param + // This is no benchmark. It merely exist to have an easy way to pretty print the currently + // configured `Schedule` during benchmark development. + // It can be outputted using the following command: + // cargo run --manifest-path=bin/node/cli/Cargo.toml \ + // --features runtime-benchmarks -- benchmark pallet --extra --dev --execution=native \ + // -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares + #[extra] #[pov_mode = Ignored] - instr_i64store { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated_dyn(r, vec![ - RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), - RandomI64Repeated(1), - Regular(Instruction::I64Store(3, 0)), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } + print_schedule { + #[cfg(feature = "std")] + { + let max_weight = ::BlockWeights::get().max_block; + let (weight_per_key, key_budget) = ContractInfo::::deletion_budget(max_weight); + println!("{:#?}", Schedule::::default()); + println!("###############################################"); + println!("Lazy deletion weight per key: {weight_per_key}"); + println!("Lazy deletion throughput per block: {key_budget}"); + } + #[cfg(not(feature = "std"))] + Err("Run this bench with a native runtime in order to see the schedule.")?; + }: {} - // w_select = w_bench - 4 * w_param - #[pov_mode = Ignored] - instr_select { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomI64Repeated(1), - RandomI64Repeated(1), - RandomI32(0, 2), - Regular(Instruction::Select), - Regular(Instruction::Drop), - ])), - .. Default::default() - })); + // Execute one erc20 transfer using the ink! erc20 example contract. + #[extra] + #[pov_mode = Measured] + ink_erc20_transfer { + let code = load_benchmark!("ink_erc20"); + let data = { + let new: ([u8; 4], BalanceOf) = ([0x9b, 0xae, 0x9d, 0x5e], 1000u32.into()); + new.encode() + }; + let instance = Contract::::new( + WasmModule::from_code(code), data, + )?; + let data = { + let transfer: ([u8; 4], AccountIdOf, BalanceOf) = ( + [0x84, 0xa1, 0x5d, 0xa1], + account::("receiver", 0, 0), + 1u32.into(), + ); + transfer.encode() + }; }: { - sbox.invoke(); - } - - // w_if = w_bench - 3 * w_param - #[pov_mode = Ignored] - instr_if { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomI32(0, 2), - Regular(Instruction::If(BlockType::Value(ValueType::I64))), - RandomI64Repeated(1), - Regular(Instruction::Else), - RandomI64Repeated(1), - Regular(Instruction::End), - Regular(Instruction::Drop), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_br = w_bench - 2 * w_param - // Block instructions are not counted. - #[pov_mode = Ignored] - instr_br { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Br(1)), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_br_if = w_bench - 3 * w_param - // Block instructions are not counted. - #[pov_mode = Ignored] - instr_br_if { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::I32Const(1)), - Regular(Instruction::BrIf(1)), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_br_table = w_bench - 3 * w_param - // Block instructions are not counted. - #[pov_mode = Ignored] - instr_br_table { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let table = Box::new(BrTableData { - table: Box::new([1, 1, 1]), - default: 1, - }); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - Regular(Instruction::Block(BlockType::NoResult)), - RandomI32(0, 4), - Regular(Instruction::BrTable(table)), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - RandomI64Repeated(1), - Regular(Instruction::Drop), - Regular(Instruction::End), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_br_table_per_entry = w_bench - #[pov_mode = Ignored] - instr_br_table_per_entry { - let e in 1 .. T::Schedule::get().limits.br_table_size; - let entry: Vec = [0, 1].iter() - .cloned() - .cycle() - .take((e / 2) as usize).collect(); - let table = Box::new(BrTableData { - table: entry.into_boxed_slice(), - default: 0, - }); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::plain(vec![ - Instruction::Block(BlockType::NoResult), - Instruction::Block(BlockType::NoResult), - Instruction::Block(BlockType::NoResult), - Instruction::I32Const((e / 2) as i32), - Instruction::BrTable(table), - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - Instruction::End, - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_call = w_bench - 2 * w_param - #[pov_mode = Ignored] - instr_call { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - // We need to make use of the stack here in order to trigger stack height - // instrumentation. - aux_body: Some(body::plain(vec![ - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - ])), - call_body: Some(body::repeated(r, &[ - Instruction::Call(2), // call aux - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_call_indrect = w_bench - 3 * w_param - #[pov_mode = Ignored] - instr_call_indirect { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let num_elements = T::Schedule::get().limits.table_size; - use self::code::TableSegment; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - // We need to make use of the stack here in order to trigger stack height - // instrumentation. - aux_body: Some(body::plain(vec![ - Instruction::I64Const(42), - Instruction::Drop, - Instruction::End, - ])), - call_body: Some(body::repeated_dyn(r, vec![ - RandomI32(0, num_elements as i32), - Regular(Instruction::CallIndirect(0, 0)), // we only have one sig: 0 - ])), - table: Some(TableSegment { - num_elements, - function_index: 2, // aux - }), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_per_local = w_bench - #[pov_mode = Ignored] - instr_call_per_local { - let l in 0 .. T::Schedule::get().limits.locals; - let mut aux_body = body::plain(vec![ - Instruction::End, - ]); - body::inject_locals(&mut aux_body, l); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - aux_body: Some(aux_body), - call_body: Some(body::plain(vec![ - Instruction::Call(2), // call aux - Instruction::End, - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_local_get = w_bench - 1 * w_param - #[pov_mode = Ignored] - instr_local_get { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r, vec![ - RandomGetLocal(0, max_locals), - Regular(Instruction::Drop), - ]); - body::inject_locals(&mut call_body, max_locals); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(call_body), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_local_set = w_bench - 1 * w_param - #[pov_mode = Ignored] - instr_local_set { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r, vec![ - RandomI64Repeated(1), - RandomSetLocal(0, max_locals), - ]); - body::inject_locals(&mut call_body, max_locals); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(call_body), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_local_tee = w_bench - 2 * w_param - #[pov_mode = Ignored] - instr_local_tee { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let max_locals = T::Schedule::get().limits.locals; - let mut call_body = body::repeated_dyn(r, vec![ - RandomI64Repeated(1), - RandomTeeLocal(0, max_locals), - Regular(Instruction::Drop), - ]); - body::inject_locals(&mut call_body, max_locals); - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(call_body), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_global_get = w_bench - 1 * w_param - #[pov_mode = Ignored] - instr_global_get { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let max_globals = T::Schedule::get().limits.globals; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomGetGlobal(0, max_globals), - Regular(Instruction::Drop), - ])), - num_globals: max_globals, - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_global_set = w_bench - 1 * w_param - #[pov_mode = Ignored] - instr_global_set { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let max_globals = T::Schedule::get().limits.globals; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomI64Repeated(1), - RandomSetGlobal(0, max_globals), - ])), - num_globals: max_globals, - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_memory_get = w_bench - 1 * w_param - #[pov_mode = Ignored] - instr_memory_current { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - call_body: Some(body::repeated(r, &[ - Instruction::CurrentMemory(0), - Instruction::Drop - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // w_memory_grow = w_bench - 2 * w_param - // We can only allow allocate as much memory as it is allowed in a contract. - // Therefore the repeat count is limited by the maximum memory any contract can have. - // Using a contract with more memory will skew the benchmark because the runtime of grow - // depends on how much memory is already allocated. - #[pov_mode = Ignored] - instr_memory_grow { - let r in 0 .. ImportedMemory::max::().max_pages; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory { - min_pages: 0, - max_pages: ImportedMemory::max::().max_pages, - }), - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(1), - Instruction::GrowMemory(0), - Instruction::Drop, - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - // Unary numeric instructions. - // All use w = w_bench - 2 * w_param. - - #[pov_mode = Ignored] - instr_i64clz { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::unary_instr( - Instruction::I64Clz, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64ctz { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::unary_instr( - Instruction::I64Ctz, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64popcnt { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::unary_instr( - Instruction::I64Popcnt, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64eqz { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::unary_instr( - Instruction::I64Eqz, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64extendsi32 { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomI32Repeated(1), - Regular(Instruction::I64ExtendSI32), - Regular(Instruction::Drop), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64extendui32 { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { - call_body: Some(body::repeated_dyn(r, vec![ - RandomI32Repeated(1), - Regular(Instruction::I64ExtendUI32), - Regular(Instruction::Drop), - ])), - .. Default::default() - })); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i32wrapi64 { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::unary_instr( - Instruction::I32WrapI64, - r, - )); - }: { - sbox.invoke(); - } - - // Binary numeric instructions. - // All use w = w_bench - 3 * w_param. - - #[pov_mode = Ignored] - instr_i64eq { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Eq, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64ne { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Ne, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64lts { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64LtS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64ltu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64LtU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64gts { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64GtS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64gtu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64GtU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64les { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64LeS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64leu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64LeU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64ges { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64GeS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64geu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64GeU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64add { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Add, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64sub { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Sub, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64mul { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Mul, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64divs { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64DivS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64divu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64DivU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64rems { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64RemS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64remu { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64RemU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64and { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64And, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64or { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Or, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64xor { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Xor, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64shl { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Shl, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64shrs { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64ShrS, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64shru { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64ShrU, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64rotl { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Rotl, - r, - )); - }: { - sbox.invoke(); - } - - #[pov_mode = Ignored] - instr_i64rotr { - let r in 0 .. INSTR_BENCHMARK_RUNS; - let mut sbox = Sandbox::from(&WasmModule::::binary_instr( - Instruction::I64Rotr, - r, - )); - }: { - sbox.invoke(); - } - - // This is no benchmark. It merely exist to have an easy way to pretty print the currently - // configured `Schedule` during benchmark development. - // It can be outputted using the following command: - // cargo run --manifest-path=bin/node/cli/Cargo.toml \ - // --features runtime-benchmarks -- benchmark pallet --extra --dev --execution=native \ - // -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares - #[extra] - #[pov_mode = Ignored] - print_schedule { - #[cfg(feature = "std")] - { - let max_weight = ::BlockWeights::get().max_block; - let (weight_per_key, key_budget) = ContractInfo::::deletion_budget(max_weight); - println!("{:#?}", Schedule::::default()); - println!("###############################################"); - println!("Lazy deletion weight per key: {weight_per_key}"); - println!("Lazy deletion throughput per block: {key_budget}"); - } - #[cfg(not(feature = "std"))] - Err("Run this bench with a native runtime in order to see the schedule.")?; - }: {} - - // Execute one erc20 transfer using the ink! erc20 example contract. - // - // `g` is used to enable gas instrumentation to compare the performance impact of - // that instrumentation at runtime. - #[extra] - #[pov_mode = Measured] - ink_erc20_transfer { - let g in 0 .. 1; - let gas_metering = g != 0; - let code = load_benchmark!("ink_erc20"); - let data = { - let new: ([u8; 4], BalanceOf) = ([0x9b, 0xae, 0x9d, 0x5e], 1000u32.into()); - new.encode() - }; - let instance = Contract::::new( - WasmModule::instrumented(code, gas_metering), data, - )?; - let data = { - let transfer: ([u8; 4], AccountIdOf, BalanceOf) = ( - [0x84, 0xa1, 0x5d, 0xa1], - account::("receiver", 0, 0), - 1u32.into(), - ); - transfer.encode() - }; - }: { - >::bare_call( - instance.caller, - instance.account_id, - 0u32.into(), - Weight::MAX, - None, - data, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result?; + >::bare_call( + instance.caller, + instance.account_id, + 0u32.into(), + Weight::MAX, + None, + data, + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ) + .result?; } // Execute one erc20 transfer using the open zeppelin erc20 contract compiled with solang. - // - // `g` is used to enable gas instrumentation to compare the performance impact of - // that instrumentation at runtime. #[extra] #[pov_mode = Measured] solang_erc20_transfer { - let g in 0 .. 1; - let gas_metering = g != 0; let code = include_bytes!("../../benchmarks/solang_erc20.wasm"); let caller = account::("instantiator", 0, 0); let mut balance = [0u8; 32]; @@ -3225,7 +2668,7 @@ benchmarks! { new.encode() }; let instance = Contract::::with_caller( - caller, WasmModule::instrumented(code, gas_metering), data, + caller, WasmModule::from_code(code), data, )?; balance[0] = 1; let data = { diff --git a/frame/contracts/src/benchmarking/sandbox.rs b/frame/contracts/src/benchmarking/sandbox.rs index 7e28840981008..34974b02ea0c4 100644 --- a/frame/contracts/src/benchmarking/sandbox.rs +++ b/frame/contracts/src/benchmarking/sandbox.rs @@ -15,13 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -/// ! For instruction benchmarking we do no instantiate a full contract but merely the -/// ! sandbox to execute the wasm code. This is because we do not need the full +/// ! For instruction benchmarking we do not instantiate a full contract but merely the +/// ! sandbox to execute the Wasm code. This is because we do not need the full /// ! environment that provides the seal interface as imported functions. use super::{code::WasmModule, Config}; use crate::wasm::{ - AllowDeprecatedInterface, AllowUnstableInterface, Environment, PrefabWasmModule, + AllowDeprecatedInterface, AllowUnstableInterface, Determinism, Environment, WasmBlob, }; +use sp_core::Get; use wasmi::{errors::LinkerError, Func, Linker, StackLimits, Store}; /// Minimal execution environment without any imported functions. @@ -38,23 +39,25 @@ impl Sandbox { } impl From<&WasmModule> for Sandbox { - /// Creates an instance from the supplied module and supplies as much memory - /// to the instance as the module declares as imported. + /// Creates an instance from the supplied module. + /// Sets the execution engine fuel level to `u64::MAX`. fn from(module: &WasmModule) -> Self { - let memory = module - .memory - .as_ref() - .map(|mem| (mem.min_pages, mem.max_pages)) - .unwrap_or((0, 0)); - let (store, _memory, instance) = PrefabWasmModule::::instantiate::( + let (mut store, _memory, instance) = WasmBlob::::instantiate::( &module.code, (), - memory, + &::Schedule::get(), + Determinism::Relaxed, StackLimits::default(), // We are testing with an empty environment anyways AllowDeprecatedInterface::No, ) .expect("Failed to create benchmarking Sandbox instance"); + + // Set fuel for wasmi execution. + store + .add_fuel(u64::MAX) + .expect("We've set up engine to fuel consuming mode; qed"); + let entry_point = instance.get_export(&store, "call").unwrap().into_func().unwrap(); Self { entry_point, store } } diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 6d1f3df90f23e..664504d207f3a 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -79,7 +79,7 @@ use frame_support::weights::Weight; use sp_runtime::DispatchError; use sp_std::{marker::PhantomData, vec::Vec}; -pub use crate::{exec::Ext, gas::ChargedAmount, Config}; +pub use crate::{exec::Ext, gas::ChargedAmount, storage::meter::Diff, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_primitives::ReturnFlags; diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index a81a633f740ab..65a9abe0c39b6 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -15,31 +15,37 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "unsafe-debug")] +use crate::unsafe_debug::ExecutionObserver; use crate::{ gas::GasMeter, - storage::{self, DepositAccount, WriteOutcome}, - BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, - Event, Nonce, Origin, Pallet as Contracts, Schedule, System, LOG_TARGET, + storage::{self, meter::Diff, DepositAccount, WriteOutcome}, + BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, + DebugBufferVec, Determinism, Error, Event, Nonce, Origin, Pallet as Contracts, Schedule, + System, WasmBlob, LOG_TARGET, }; use frame_support::{ crypto::ecdsa::ECDSAExt, dispatch::{ fmt::Debug, DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, }, + ensure, storage::{with_transaction, TransactionOutcome}, traits::{ - tokens::{Fortitude::Polite, Preservation::Expendable}, - Contains, Currency, ExistenceRequirement, OriginTrait, Randomness, Time, + fungible::{Inspect, Mutate}, + tokens::{Fortitude::Polite, Preservation}, + Contains, OriginTrait, Randomness, Time, }, weights::Weight, Blake2_128Concat, BoundedVec, StorageHasher, }; -use frame_system::RawOrigin; -use pallet_contracts_primitives::ExecReturnValue; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use pallet_contracts_primitives::{ExecReturnValue, StorageDeposit}; use smallvec::{Array, SmallVec}; use sp_core::{ ecdsa::Public as ECDSAPublic, sr25519::{Public as SR25519Public, Signature as SR25519Signature}, + Get, }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::traits::{Convert, Hash, Zero}; @@ -48,7 +54,6 @@ use sp_std::{marker::PhantomData, mem, prelude::*, vec::Vec}; pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; pub type SeedOf = ::Hash; -pub type BlockNumberOf = ::BlockNumber; pub type ExecResult = Result; /// A type that represents a topic of an event. At the moment a hash is used. @@ -135,7 +140,7 @@ pub trait Ext: sealing::Sealed { /// Call (possibly transferring some amount of funds) into the specified account. /// - /// Returns the original code size of the called contract. + /// Returns the code size of the called contract. fn call( &mut self, gas_limit: Weight, @@ -148,7 +153,7 @@ pub trait Ext: sealing::Sealed { /// Execute code in the current frame. /// - /// Returns the original code size of the called contract. + /// Returns the code size of the called contract. fn delegate_call( &mut self, code: CodeHash, @@ -159,7 +164,7 @@ pub trait Ext: sealing::Sealed { /// /// Returns the original code size of the called contract. /// The newly created account will be associated with `code`. `value` specifies the amount of - /// value transferred from this to the newly created account. + /// value transferred from the caller to the newly created account. fn instantiate( &mut self, gas_limit: Weight, @@ -244,7 +249,7 @@ pub trait Ext: sealing::Sealed { fn minimum_balance(&self) -> BalanceOf; /// Returns a random number for the current block with the given subject. - fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf); + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberFor); /// Deposit an event with the given topics. /// @@ -252,7 +257,7 @@ pub trait Ext: sealing::Sealed { fn deposit_event(&mut self, topics: Vec>, data: Vec); /// Returns the current block number. - fn block_number(&self) -> BlockNumberOf; + fn block_number(&self) -> BlockNumberFor; /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; @@ -263,8 +268,14 @@ pub trait Ext: sealing::Sealed { /// Get a reference to the schedule used by the current call. fn schedule(&self) -> &Schedule; + /// Get an immutable reference to the nested gas meter. + fn gas_meter(&self) -> &GasMeter; + /// Get a mutable reference to the nested gas meter. - fn gas_meter(&mut self) -> &mut GasMeter; + fn gas_meter_mut(&mut self) -> &mut GasMeter; + + /// Charges `diff` from the meter. + fn charge_storage(&mut self, diff: &Diff); /// Append a string to the debug buffer. /// @@ -306,10 +317,49 @@ pub trait Ext: sealing::Sealed { /// Returns a nonce that is incremented for every instantiated contract. fn nonce(&mut self) -> u64; + + /// Adds a delegate dependency to [`ContractInfo`]'s `delegate_dependencies` field. + /// + /// This ensures that the delegated contract is not removed while it is still in use. It + /// increases the reference count of the code hash and charges a fraction (see + /// [`Config::CodeHashLockupDepositPercent`]) of the code deposit. + /// + /// # Errors + /// + /// - [`Error::::MaxDelegateDependenciesReached`] + /// - [`Error::::CannotAddSelfAsDelegateDependency`] + /// - [`Error::::DelegateDependencyAlreadyExists`] + fn add_delegate_dependency( + &mut self, + code_hash: CodeHash, + ) -> Result<(), DispatchError>; + + /// Removes a delegate dependency from [`ContractInfo`]'s `delegate_dependencies` field. + /// + /// This is the counterpart of [`Self::add_delegate_dependency`]. It decreases the reference + /// count and refunds the deposit that was charged by [`Self::add_delegate_dependency`]. + /// + /// # Errors + /// + /// - [`Error::::DelegateDependencyNotFound`] + fn remove_delegate_dependency( + &mut self, + code_hash: &CodeHash, + ) -> Result<(), DispatchError>; } /// Describes the different functions that can be exported by an [`Executable`]. -#[derive(Clone, Copy, PartialEq)] +#[derive( + Copy, + Clone, + PartialEq, + Eq, + sp_core::RuntimeDebug, + codec::Decode, + codec::Encode, + codec::MaxEncodedLen, + scale_info::TypeInfo, +)] pub enum ExportedFunction { /// The constructor function which is executed on deployment of a contract. Constructor, @@ -325,24 +375,27 @@ pub trait Executable: Sized { /// Load the executable from storage. /// /// # Note - /// Charges size base load and instrumentation weight from the gas meter. + /// Charges size base load weight from the gas meter. fn from_storage( code_hash: CodeHash, - schedule: &Schedule, gas_meter: &mut GasMeter, ) -> Result; - /// Increment the refcount of a code in-storage by one. - /// - /// This is needed when the code is not set via instantiate but `seal_set_code_hash`. + /// Increment the reference count of a of a stored code by one. /// /// # Errors /// - /// [`Error::CodeNotFound`] is returned if the specified `code_hash` does not exist. - fn add_user(code_hash: CodeHash) -> Result<(), DispatchError>; + /// [`Error::CodeNotFound`] is returned if no stored code found having the specified + /// `code_hash`. + fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError>; - /// Decrement the refcount by one if the code exists. - fn remove_user(code_hash: CodeHash); + /// Decrement the reference count of a stored code by one. + /// + /// # Note + /// + /// A contract whose reference count dropped to zero isn't automatically removed. A + /// `remove_code` transaction must be submitted by the original uploader to do so. + fn decrement_refcount(code_hash: CodeHash); /// Execute the specified exported function and return the result. /// @@ -360,10 +413,13 @@ pub trait Executable: Sized { input_data: Vec, ) -> ExecResult; + /// The code info of the executable. + fn code_info(&self) -> &CodeInfo; + /// The code hash of the executable. fn code_hash(&self) -> &CodeHash; - /// Size of the instrumented code in bytes. + /// Size of the contract code in bytes. fn code_len(&self) -> u32; /// The code does not contain any instructions which could lead to indeterminism. @@ -394,7 +450,7 @@ pub struct Stack<'a, T: Config, E> { /// The timestamp at the point of call stack instantiation. timestamp: MomentOf, /// The block number at the time of call stack instantiation. - block_number: T::BlockNumber, + block_number: BlockNumberFor, /// The nonce is cached here when accessed. It is written back when the call stack /// finishes executing. Please refer to [`Nonce`] to a description of /// the nonce itself. @@ -703,9 +759,9 @@ where Weight::zero(), storage_meter, BalanceOf::::zero(), - schedule, determinism, )?; + let stack = Self { origin, schedule, @@ -735,7 +791,6 @@ where gas_limit: Weight, storage_meter: &mut storage::meter::GenericMeter, deposit_limit: BalanceOf, - schedule: &Schedule, determinism: Determinism, ) -> Result<(Frame, E, Option), ExecError> { let (account_id, contract_info, executable, delegate_caller, entry_point, nonce) = @@ -751,7 +806,7 @@ where if let Some(DelegatedCall { executable, caller }) = delegated_call { (executable, Some(caller)) } else { - (E::from_storage(contract.code_hash, schedule, gas_meter)?, None) + (E::from_storage(contract.code_hash, gas_meter)?, None) }; (dest, contract, executable, delegate_caller, ExportedFunction::Call, None) @@ -759,7 +814,7 @@ where FrameArgs::Instantiate { sender, nonce, executable, salt, input_data } => { let account_id = Contracts::::contract_address( &sender, - executable.code_hash(), + &executable.code_hash(), input_data, salt, ); @@ -831,7 +886,6 @@ where gas_limit, nested_storage, deposit_limit, - self.schedule, self.determinism, )?; self.frames.push(frame); @@ -858,17 +912,28 @@ where origin, &frame.account_id, frame.contract_info.get(&frame.account_id), + executable.code_info(), )?; } // Every non delegate call or instantiate also optionally transfers the balance. self.initial_transfer()?; - // Call into the wasm blob. + #[cfg(feature = "unsafe-debug")] + let (code_hash, input_clone) = { + let code_hash = *executable.code_hash(); + T::Debug::before_call(&code_hash, entry_point, &input_data); + (code_hash, input_data.clone()) + }; + + // Call into the Wasm blob. let output = executable .execute(self, &entry_point, input_data) .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; + #[cfg(feature = "unsafe-debug")] + T::Debug::after_call(&code_hash, entry_point, input_clone, &output); + // Avoid useless work that would be reverted anyways. if output.did_revert() { return Ok(output) @@ -1061,13 +1126,15 @@ where /// Transfer some funds from `from` to `to`. fn transfer( - existence_requirement: ExistenceRequirement, + preservation: Preservation, from: &T::AccountId, to: &T::AccountId, value: BalanceOf, ) -> DispatchResult { - T::Currency::transfer(from, to, value, existence_requirement) - .map_err(|_| Error::::TransferFailed)?; + if !value.is_zero() && from != to { + T::Currency::transfer(from, to, value, preservation) + .map_err(|_| Error::::TransferFailed)?; + } Ok(()) } @@ -1091,7 +1158,7 @@ where Origin::Root if value.is_zero() => return Ok(()), Origin::Root => return DispatchError::RootNotAllowed.into(), }; - Self::transfer(ExistenceRequirement::KeepAlive, &caller, &frame.account_id, value) + Self::transfer(Preservation::Preserve, &caller, &frame.account_id, value) } /// Reference to the current (top) frame. @@ -1193,7 +1260,7 @@ where code_hash: CodeHash, input_data: Vec, ) -> Result { - let executable = E::from_storage(code_hash, self.schedule, self.gas_meter())?; + let executable = E::from_storage(code_hash, self.gas_meter_mut())?; let top_frame = self.top_frame_mut(); let contract_info = top_frame.contract_info().clone(); let account_id = top_frame.account_id.clone(); @@ -1220,7 +1287,7 @@ where input_data: Vec, salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - let executable = E::from_storage(code_hash, self.schedule, self.gas_meter())?; + let executable = E::from_storage(code_hash, self.gas_meter_mut())?; let nonce = self.next_nonce(); let executable = self.push_frame( FrameArgs::Instantiate { @@ -1239,7 +1306,6 @@ where } fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { - use frame_support::traits::fungible::Inspect; if self.is_recursive() { return Err(Error::::TerminatedWhileReentrant.into()) } @@ -1247,15 +1313,23 @@ where let info = frame.terminate(); frame.nested_storage.terminate(&info); System::::dec_consumers(&frame.account_id); - T::Currency::transfer( + Self::transfer( + Preservation::Expendable, &frame.account_id, beneficiary, - T::Currency::reducible_balance(&frame.account_id, Expendable, Polite), - ExistenceRequirement::AllowDeath, + T::Currency::reducible_balance(&frame.account_id, Preservation::Expendable, Polite), )?; info.queue_trie_for_deletion(); ContractInfoOf::::remove(&frame.account_id); - E::remove_user(info.code_hash); + E::decrement_refcount(info.code_hash); + + for (code_hash, deposit) in info.delegate_dependencies() { + E::decrement_refcount(*code_hash); + frame + .nested_storage + .charge_deposit(info.deposit_account().clone(), StorageDeposit::Refund(*deposit)); + } + Contracts::::deposit_event( vec![T::Hashing::hash_of(&frame.account_id), T::Hashing::hash_of(&beneficiary)], Event::Terminated { @@ -1267,7 +1341,7 @@ where } fn transfer(&mut self, to: &T::AccountId, value: BalanceOf) -> DispatchResult { - Self::transfer(ExistenceRequirement::KeepAlive, &self.top_frame().account_id, to, value) + Self::transfer(Preservation::Preserve, &self.top_frame().account_id, to, value) } fn get_storage(&mut self, key: &Key) -> Option> { @@ -1330,14 +1404,14 @@ where } fn balance(&self) -> BalanceOf { - T::Currency::free_balance(&self.top_frame().account_id) + T::Currency::balance(&self.top_frame().account_id) } fn value_transferred(&self) -> BalanceOf { self.top_frame().value_transferred } - fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberFor) { T::Randomness::random(subject) } @@ -1356,7 +1430,7 @@ where ); } - fn block_number(&self) -> T::BlockNumber { + fn block_number(&self) -> BlockNumberFor { self.block_number } @@ -1372,10 +1446,18 @@ where self.schedule } - fn gas_meter(&mut self) -> &mut GasMeter { + fn gas_meter(&self) -> &GasMeter { + &self.top_frame().nested_gas + } + + fn gas_meter_mut(&mut self) -> &mut GasMeter { &mut self.top_frame_mut().nested_gas } + fn charge_storage(&mut self, diff: &Diff) { + self.top_frame_mut().nested_storage.charge(diff) + } + fn append_debug_buffer(&mut self, msg: &str) -> bool { if let Some(buffer) = &mut self.debug_message { buffer @@ -1423,13 +1505,27 @@ where fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { let frame = top_frame_mut!(self); - if !E::from_storage(hash, self.schedule, &mut frame.nested_gas)?.is_deterministic() { + if !E::from_storage(hash, &mut frame.nested_gas)?.is_deterministic() { return Err(>::Indeterministic.into()) } - E::add_user(hash)?; - let prev_hash = frame.contract_info().code_hash; - E::remove_user(prev_hash); - frame.contract_info().code_hash = hash; + + let info = frame.contract_info(); + + let prev_hash = info.code_hash; + info.code_hash = hash; + + let code_info = CodeInfoOf::::get(hash).ok_or(Error::::CodeNotFound)?; + + let old_base_deposit = info.storage_base_deposit(); + let new_base_deposit = info.update_base_deposit(&code_info); + let deposit = StorageDeposit::Charge(new_base_deposit) + .saturating_sub(&StorageDeposit::Charge(old_base_deposit)); + + let deposit_account = info.deposit_account().clone(); + frame.nested_storage.charge_deposit(deposit_account, deposit); + + E::increment_refcount(hash)?; + E::decrement_refcount(prev_hash); Contracts::::deposit_event( vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], Event::ContractCodeUpdated { @@ -1461,6 +1557,41 @@ where current } } + + fn add_delegate_dependency( + &mut self, + code_hash: CodeHash, + ) -> Result<(), DispatchError> { + let frame = self.top_frame_mut(); + let info = frame.contract_info.get(&frame.account_id); + ensure!(code_hash != info.code_hash, Error::::CannotAddSelfAsDelegateDependency); + + let code_info = CodeInfoOf::::get(code_hash).ok_or(Error::::CodeNotFound)?; + let deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); + + info.add_delegate_dependency(code_hash, deposit)?; + >::increment_refcount(code_hash)?; + frame + .nested_storage + .charge_deposit(info.deposit_account().clone(), StorageDeposit::Charge(deposit)); + Ok(()) + } + + fn remove_delegate_dependency( + &mut self, + code_hash: &CodeHash, + ) -> Result<(), DispatchError> { + let frame = self.top_frame_mut(); + let info = frame.contract_info.get(&frame.account_id); + + let deposit = info.remove_delegate_dependency(code_hash)?; + >::decrement_refcount(*code_hash); + + frame + .nested_storage + .charge_deposit(info.deposit_account().clone(), StorageDeposit::Refund(deposit)); + Ok(()) + } } mod sealing { @@ -1536,6 +1667,7 @@ mod tests { func: Rc ExecResult + 'static>, func_type: ExportedFunction, code_hash: CodeHash, + code_info: CodeInfo, refcount: u64, } @@ -1546,6 +1678,10 @@ mod tests { } impl MockLoader { + fn code_hashes() -> Vec> { + Loader::get().map.keys().copied().collect() + } + fn insert( func_type: ExportedFunction, f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, @@ -1556,7 +1692,13 @@ mod tests { loader.counter += 1; loader.map.insert( hash, - MockExecutable { func: Rc::new(f), func_type, code_hash: hash, refcount: 1 }, + MockExecutable { + func: Rc::new(f), + func_type, + code_hash: hash, + code_info: CodeInfo::::new(ALICE), + refcount: 1, + }, ); hash }) @@ -1591,7 +1733,6 @@ mod tests { impl Executable for MockExecutable { fn from_storage( code_hash: CodeHash, - _schedule: &Schedule, _gas_meter: &mut GasMeter, ) -> Result { Loader::mutate(|loader| { @@ -1599,11 +1740,11 @@ mod tests { }) } - fn add_user(code_hash: CodeHash) -> Result<(), DispatchError> { + fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { MockLoader::increment_refcount(code_hash) } - fn remove_user(code_hash: CodeHash) { + fn decrement_refcount(code_hash: CodeHash) { MockLoader::decrement_refcount(code_hash); } @@ -1614,7 +1755,7 @@ mod tests { input_data: Vec, ) -> ExecResult { if let &Constructor = function { - Self::add_user(self.code_hash).unwrap(); + Self::increment_refcount(self.code_hash).unwrap(); } if function == &self.func_type { (self.func)(MockCtx { ext, input_data }, &self) @@ -1627,6 +1768,10 @@ mod tests { &self.code_hash } + fn code_info(&self) -> &CodeInfo { + &self.code_info + } + fn code_len(&self) -> u32 { 0 } @@ -1694,7 +1839,7 @@ mod tests { set_balance(&origin, 100); set_balance(&dest, 0); - MockStack::transfer(ExistenceRequirement::KeepAlive, &origin, &dest, 55).unwrap(); + MockStack::transfer(Preservation::Preserve, &origin, &dest, 55).unwrap(); assert_eq!(get_balance(&origin), 45); assert_eq!(get_balance(&dest), 55); @@ -1832,7 +1977,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { set_balance(&origin, 0); - let result = MockStack::transfer(ExistenceRequirement::KeepAlive, &origin, &dest, 100); + let result = MockStack::transfer(Preservation::Preserve, &origin, &dest, 100); assert_eq!(result, Err(Error::::TransferFailed.into())); assert_eq!(get_balance(&origin), 0); @@ -1948,30 +2093,33 @@ mod tests { }); // This one tests passing the input data into a contract via instantiate. - ExtBuilder::default().build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = - MockExecutable::from_storage(input_data_ch, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, min_balance * 10_000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, None, min_balance).unwrap(); - - let result = MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance, - vec![1, 2, 3, 4], - &[], - None, - ); - assert_matches!(result, Ok(_)); - }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(input_data_ch, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 10_000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, None, min_balance).unwrap(); + + let result = MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance, + vec![1, 2, 3, 4], + &[], + None, + ); + assert_matches!(result, Ok(_)); + }); } #[test] @@ -2366,8 +2514,7 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = ::Schedule::get(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = - MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); + let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); let contract_origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&contract_origin, Some(0), 0).unwrap(); @@ -2395,44 +2542,53 @@ mod tests { Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) }); - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = - MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, min_balance * 1000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, Some(min_balance * 100), min_balance) - .unwrap(); - - let instantiated_contract_address = assert_matches!( - MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &mut storage_meter, - &schedule, + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 1000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new( + &contract_origin, + Some(min_balance * 100), min_balance, - vec![], - &[], - None, - ), - Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address - ); + ) + .unwrap(); - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!( - ContractInfo::::load_code_hash(&instantiated_contract_address).unwrap(), - dummy_ch - ); - assert_eq!( - &events(), - &[Event::Instantiated { deployer: ALICE, contract: instantiated_contract_address }] - ); - }); + let instantiated_contract_address = assert_matches!( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance, + vec![], + &[], + None, + ), + Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address + ); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ContractInfo::::load_code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!( + &events(), + &[Event::Instantiated { + deployer: ALICE, + contract: instantiated_contract_address + }] + ); + }); } #[test] @@ -2441,37 +2597,45 @@ mod tests { Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) }); - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = - MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, min_balance * 1000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, Some(min_balance * 100), min_balance) - .unwrap(); - - let instantiated_contract_address = assert_matches!( - MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &mut storage_meter, - &schedule, + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 1000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new( + &contract_origin, + Some(min_balance * 100), min_balance, - vec![], - &[], - None, - ), - Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address - ); + ) + .unwrap(); - // Check that the account has not been created. - assert!(ContractInfo::::load_code_hash(&instantiated_contract_address).is_none()); - assert!(events().is_empty()); - }); + let instantiated_contract_address = assert_matches!( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance, + vec![], + &[], + None, + ), + Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address + ); + + // Check that the account has not been created. + assert!( + ContractInfo::::load_code_hash(&instantiated_contract_address).is_none() + ); + assert!(events().is_empty()); + }); } #[test] @@ -2499,51 +2663,58 @@ mod tests { } }); - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - set_balance(&ALICE, min_balance * 100); - place_contract(&BOB, instantiator_ch); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new( - &contract_origin, - Some(min_balance * 10), - min_balance * 10, - ) - .unwrap(); - - assert_matches!( - MockStack::run_call( - contract_origin, - BOB, - &mut GasMeter::::new(GAS_LIMIT), - &mut storage_meter, - &schedule, + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + set_balance(&ALICE, min_balance * 100); + place_contract(&BOB, instantiator_ch); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new( + &contract_origin, + Some(min_balance * 10), min_balance * 10, - vec![], - None, - Determinism::Enforced, - ), - Ok(_) - ); + ) + .unwrap(); - let instantiated_contract_address = - instantiated_contract_address.borrow().as_ref().unwrap().clone(); + assert_matches!( + MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + min_balance * 10, + vec![], + None, + Determinism::Enforced, + ), + Ok(_) + ); - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!( - ContractInfo::::load_code_hash(&instantiated_contract_address).unwrap(), - dummy_ch - ); - assert_eq!( - &events(), - &[ - Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }, - Event::Called { caller: Origin::from_account_id(ALICE), contract: BOB }, - ] - ); - }); + let instantiated_contract_address = + instantiated_contract_address.borrow().as_ref().unwrap().clone(); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ContractInfo::::load_code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!( + &events(), + &[ + Event::Instantiated { + deployer: BOB, + contract: instantiated_contract_address + }, + Event::Called { caller: Origin::from_account_id(ALICE), contract: BOB }, + ] + ); + }); } #[test] @@ -2571,37 +2742,41 @@ mod tests { } }); - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = ::Schedule::get(); - set_balance(&ALICE, 1000); - set_balance(&BOB, 100); - place_contract(&BOB, instantiator_ch); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, Some(200), 0).unwrap(); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, Some(200), 0).unwrap(); - assert_matches!( - MockStack::run_call( - contract_origin, - BOB, - &mut GasMeter::::new(GAS_LIMIT), - &mut storage_meter, - &schedule, - 0, - vec![], - None, - Determinism::Enforced, - ), - Ok(_) - ); + assert_matches!( + MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + 0, + vec![], + None, + Determinism::Enforced, + ), + Ok(_) + ); - // The contract wasn't instantiated so we don't expect to see an instantiation - // event here. - assert_eq!( - &events(), - &[Event::Called { caller: Origin::from_account_id(ALICE), contract: BOB },] - ); - }); + // The contract wasn't instantiated so we don't expect to see an instantiation + // event here. + assert_eq!( + &events(), + &[Event::Called { caller: Origin::from_account_id(ALICE), contract: BOB },] + ); + }); } #[test] @@ -2611,33 +2786,37 @@ mod tests { exec_success() }); - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let schedule = ::Schedule::get(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = - MockExecutable::from_storage(terminate_ch, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, 10_000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, None, 100).unwrap(); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(terminate_ch, &mut gas_meter).unwrap(); + set_balance(&ALICE, 10_000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, None, 100).unwrap(); - assert_eq!( - MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - 100, - vec![], - &[], - None, - ), - Err(Error::::TerminatedInConstructor.into()) - ); + assert_eq!( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + 100, + vec![], + &[], + None, + ), + Err(Error::::TerminatedInConstructor.into()) + ); - assert_eq!(&events(), &[]); - }); + assert_eq!(&events(), &[]); + }); } #[test] @@ -2713,29 +2892,32 @@ mod tests { }); // This one tests passing the input data into a contract via instantiate. - ExtBuilder::default().build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage(code, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, min_balance * 10_000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, None, min_balance).unwrap(); - - let result = MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance, - vec![], - &[], - None, - ); - assert_matches!(result, Ok(_)); - }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage(code, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 10_000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, None, min_balance).unwrap(); + + let result = MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance, + vec![], + &[], + None, + ); + assert_matches!(result, Ok(_)); + }); } #[test] @@ -3137,76 +3319,79 @@ mod tests { exec_success() }); - ExtBuilder::default().build().execute_with(|| { - let schedule = ::Schedule::get(); - let min_balance = ::Currency::minimum_balance(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let fail_executable = - MockExecutable::from_storage(fail_code, &schedule, &mut gas_meter).unwrap(); - let success_executable = - MockExecutable::from_storage(success_code, &schedule, &mut gas_meter).unwrap(); - let succ_fail_executable = - MockExecutable::from_storage(succ_fail_code, &schedule, &mut gas_meter).unwrap(); - let succ_succ_executable = - MockExecutable::from_storage(succ_succ_code, &schedule, &mut gas_meter).unwrap(); - set_balance(&ALICE, min_balance * 10_000); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&contract_origin, None, min_balance * 100).unwrap(); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .build() + .execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let fail_executable = + MockExecutable::from_storage(fail_code, &mut gas_meter).unwrap(); + let success_executable = + MockExecutable::from_storage(success_code, &mut gas_meter).unwrap(); + let succ_fail_executable = + MockExecutable::from_storage(succ_fail_code, &mut gas_meter).unwrap(); + let succ_succ_executable = + MockExecutable::from_storage(succ_succ_code, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 10_000); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, None, min_balance * 100).unwrap(); - MockStack::run_instantiate( - ALICE, - fail_executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance * 100, - vec![], - &[], - None, - ) - .ok(); - assert_eq!(>::get(), 0); + MockStack::run_instantiate( + ALICE, + fail_executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance * 100, + vec![], + &[], + None, + ) + .ok(); + assert_eq!(>::get(), 0); - assert_ok!(MockStack::run_instantiate( - ALICE, - success_executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance * 100, - vec![], - &[], - None, - )); - assert_eq!(>::get(), 1); + assert_ok!(MockStack::run_instantiate( + ALICE, + success_executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance * 100, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 1); - assert_ok!(MockStack::run_instantiate( - ALICE, - succ_fail_executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance * 200, - vec![], - &[], - None, - )); - assert_eq!(>::get(), 2); + assert_ok!(MockStack::run_instantiate( + ALICE, + succ_fail_executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance * 200, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 2); - assert_ok!(MockStack::run_instantiate( - ALICE, - succ_succ_executable, - &mut gas_meter, - &mut storage_meter, - &schedule, - min_balance * 200, - vec![], - &[], - None, - )); - assert_eq!(>::get(), 4); - }); + assert_ok!(MockStack::run_instantiate( + ALICE, + succ_succ_executable, + &mut gas_meter, + &mut storage_meter, + &schedule, + min_balance * 200, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 4); + }); } #[test] @@ -3607,7 +3792,9 @@ mod tests { ); assert_eq!( ctx.ext.ecdsa_to_eth_address(&pubkey_compressed).unwrap(), - array_bytes::hex2array_unchecked::<20>("09231da7b19A016f9e576d23B16277062F4d46A8") + array_bytes::hex2array_unchecked::<_, 20>( + "09231da7b19A016f9e576d23B16277062F4d46A8" + ) ); exec_success() }); @@ -3674,26 +3861,30 @@ mod tests { exec_success() }); - ExtBuilder::default().build().execute_with(|| { - let min_balance = ::Currency::minimum_balance(); - let schedule = ::Schedule::get(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - set_balance(&ALICE, min_balance * 1000); - place_contract(&BOB, code_hash); - let contract_origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&contract_origin, None, 0).unwrap(); - assert_ok!(MockStack::run_call( - contract_origin, - BOB, - &mut gas_meter, - &mut storage_meter, - &schedule, - 0, - vec![], - None, - Determinism::Enforced - )); - }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .build() + .execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, min_balance * 1000); + place_contract(&BOB, code_hash); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + contract_origin, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + None, + Determinism::Enforced + )); + }); } /// This works even though random interface is deprecated, as the check to ban deprecated diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index f6484fbcf4630..7d17642d92e54 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -23,6 +23,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, }; +use sp_core::Get; use sp_runtime::traits::Zero; use sp_std::marker::PhantomData; @@ -80,6 +81,8 @@ pub struct GasMeter { gas_left: Weight, /// Due to `adjust_gas` and `nested` the `gas_left` can temporarily dip below its final value. gas_left_lowest: Weight, + /// Amount of fuel consumed by the engine from the last host function call. + engine_consumed: u64, _phantom: PhantomData, #[cfg(test)] tokens: Vec, @@ -91,6 +94,7 @@ impl GasMeter { gas_limit, gas_left: gas_limit, gas_left_lowest: gas_limit, + engine_consumed: Default::default(), _phantom: PhantomData, #[cfg(test)] tokens: Vec::new(), @@ -151,7 +155,7 @@ impl GasMeter { /// Amount is calculated by the given `token`. /// /// Returns `OutOfGas` if there is not enough gas or addition of the specified - /// amount of gas has lead to overflow. On success returns `Proceed`. + /// amount of gas has lead to overflow. /// /// NOTE that amount isn't consumed if there is not enough gas. This is considered /// safe because we always charge gas before performing any resource-spending action. @@ -181,17 +185,45 @@ impl GasMeter { self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } + /// This method is used for gas syncs with the engine. + /// + /// Updates internal `engine_comsumed` tracker of engine fuel consumption. + /// + /// Charges self with the `ref_time` Weight corresponding to wasmi fuel consumed on the engine + /// side since last sync. Passed value is scaled by multiplying it by the weight of a basic + /// operation, as such an operation in wasmi engine costs 1. + /// + /// Returns the updated `gas_left` `Weight` value from the meter. + /// Normally this would never fail, as engine should fail first when out of gas. + pub fn charge_fuel(&mut self, wasmi_fuel_total: u64) -> Result { + // Take the part consumed since the last update. + let wasmi_fuel = wasmi_fuel_total.saturating_sub(self.engine_consumed); + if !wasmi_fuel.is_zero() { + self.engine_consumed = wasmi_fuel_total; + let reftime_consumed = + wasmi_fuel.saturating_mul(T::Schedule::get().instruction_weights.base as u64); + let ref_time_left = self + .gas_left + .ref_time() + .checked_sub(reftime_consumed) + .ok_or_else(|| Error::::OutOfGas)?; + + *(self.gas_left.ref_time_mut()) = ref_time_left; + } + Ok(self.gas_left) + } + /// Returns the amount of gas that is required to run the same call. /// /// This can be different from `gas_spent` because due to `adjust_gas` the amount of /// spent gas can temporarily drop and be refunded later. pub fn gas_required(&self) -> Weight { - self.gas_limit - self.gas_left_lowest() + self.gas_limit.saturating_sub(self.gas_left_lowest()) } /// Returns how much gas was spent pub fn gas_consumed(&self) -> Weight { - self.gas_limit - self.gas_left + self.gas_limit.saturating_sub(self.gas_left) } /// Returns how much gas left from the initial budget. diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index e0e8e2d15b06d..37b79c2d585ad 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -25,12 +25,12 @@ //! //! ## Overview //! -//! This module extends accounts based on the [`Currency`] trait to have smart-contract -//! functionality. It can be used with other modules that implement accounts based on [`Currency`]. -//! These "smart-contract accounts" have the ability to instantiate smart-contracts and make calls -//! to other contract and non-contract accounts. +//! This module extends accounts based on the [`frame_support::traits::fungible`] traits to have +//! smart-contract functionality. It can be used with other modules that implement accounts based on +//! the [`frame_support::traits::fungible`] traits. These "smart-contract accounts" have the ability +//! to instantiate smart-contracts and make calls to other contract and non-contract accounts. //! -//! The smart-contract code is stored once in a code cache, and later retrievable via its hash. +//! The smart-contract code is stored once, and later retrievable via its hash. //! This means that multiple smart-contracts can be instantiated from the same hash, without //! replicating the code each time. //! @@ -41,14 +41,14 @@ //! Finally, when an account is reaped, its associated code and storage of the smart-contract //! account will also be deleted. //! -//! ### Gas +//! ### Weight //! -//! Senders must specify a gas limit with every call, as all instructions invoked by the -//! smart-contract require gas. Unused gas is refunded after the call, regardless of the execution -//! outcome. +//! Senders must specify a [`Weight`] limit with every call, as all instructions invoked by the +//! smart-contract require weight. Unused weight is refunded after the call, regardless of the +//! execution outcome. //! -//! If the gas limit is reached, then all calls and state changes (including balance transfers) are -//! only reverted at the current call's contract level. For example, if contract A calls B and B +//! If the weight limit is reached, then all calls and state changes (including balance transfers) +//! are only reverted at the current call's contract level. For example, if contract A calls B and B //! runs out of gas mid-call, then all of B's calls are reverted. Assuming correct error handling by //! contract A, A's other calls and state changes still persist. //! @@ -63,23 +63,27 @@ //! //! ### Dispatchable functions //! -//! * [`Pallet::instantiate_with_code`] - Deploys a new contract from the supplied wasm binary, +//! * [`Pallet::instantiate_with_code`] - Deploys a new contract from the supplied Wasm binary, //! optionally transferring //! some balance. This instantiates a new smart contract account with the supplied code and //! calls its constructor to initialize the contract. //! * [`Pallet::instantiate`] - The same as `instantiate_with_code` but instead of uploading new //! code an existing `code_hash` is supplied. //! * [`Pallet::call`] - Makes a call to an account, optionally transferring some balance. +//! * [`Pallet::upload_code`] - Uploads new code without instantiating a contract from it. +//! * [`Pallet::remove_code`] - Removes the stored code and refunds the deposit to its owner. Only +//! allowed to code owner. +//! * [`Pallet::set_code`] - Changes the code of an existing contract. Only allowed to `Root` +//! origin. +//! * [`Pallet::migrate`] - Runs migration steps of current multi-block migration in priority, +//! before [`Hooks::on_idle`][frame_support::traits::Hooks::on_idle] activates. //! //! ## Usage //! -//! The Contracts module is a work in progress. The following examples show how this module -//! can be used to instantiate and call contracts. -//! -//! * [`ink!`](https://use.ink) is -//! an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing -//! WebAssembly based smart contracts in the Rust programming language. +//! * [`ink!`](https://use.ink) is language that enables writing Wasm-based smart contracts in plain +//! Rust. +#![allow(rustdoc::private_intra_doc_links)] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "1024")] @@ -87,12 +91,13 @@ mod address; mod benchmarking; mod exec; mod gas; -mod migration; mod schedule; mod storage; mod wasm; pub mod chain_extension; +pub mod migration; +pub mod unsafe_debug; pub mod weights; #[cfg(test)] @@ -101,41 +106,44 @@ use crate::{ exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Key, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, - wasm::{OwnerInfo, PrefabWasmModule, TryInstantiate}, - weights::WeightInfo, + wasm::{CodeInfo, WasmBlob}, }; -use codec::{Codec, Decode, Encode, HasCompact}; +use codec::{Codec, Decode, Encode, HasCompact, MaxEncodedLen}; use environmental::*; use frame_support::{ - dispatch::{DispatchError, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin}, + dispatch::{ + DispatchError, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin, + WithPostDispatchInfo, + }, ensure, error::BadOrigin, traits::{ - tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, - ReservableCurrency, Time, + fungible::{Inspect, Mutate, MutateHold}, + ConstU32, Contains, Get, Randomness, Time, }, weights::Weight, - BoundedVec, RuntimeDebugNoBound, WeakBoundedVec, + BoundedVec, RuntimeDebug, RuntimeDebugNoBound, }; use frame_system::{ensure_signed, pallet_prelude::OriginFor, EventRecord, Pallet as System}; use pallet_contracts_primitives::{ Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, - ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue, - StorageDeposit, + ContractInstantiateResult, ContractResult, ExecReturnValue, GetStorageResult, + InstantiateReturnValue, StorageDeposit, }; use scale_info::TypeInfo; use smallvec::Array; -use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup, Zero}; +use sp_std::{fmt::Debug, prelude::*}; pub use crate::{ address::{AddressGenerator, DefaultAddressGenerator}, exec::Frame, - migration::Migration, + migration::{MigrateSequence, Migration, NoopMigration}, pallet::*, schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, wasm::Determinism, }; +pub use weights::WeightInfo; #[cfg(doc)] pub use crate::wasm::api_doc; @@ -143,9 +151,8 @@ pub use crate::wasm::api_doc; type CodeHash = ::Hash; type TrieId = BoundedVec>; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Inspect<::AccountId>>::Balance; type CodeVec = BoundedVec::MaxCodeLen>; -type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type DebugBufferVec = BoundedVec::MaxDebugBufferLen>; type EventRecordOf = @@ -177,13 +184,14 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_runtime::Perbill; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config { @@ -198,11 +206,12 @@ pub mod pallet { /// be instantiated from existing codes that use this deprecated functionality. It will /// be removed eventually. Hence for new `pallet-contracts` deployments it is okay /// to supply a dummy implementation for this type (because it is never used). - type Randomness: Randomness; + type Randomness: Randomness>; - /// The currency in which fees are paid and contract balances are held. - type Currency: ReservableCurrency // TODO: Move to fungible traits - + Inspect>; + /// The fungible in which fees are paid and contract balances are held. + type Currency: Inspect + + Mutate + + MutateHold; /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -279,12 +288,17 @@ pub mod pallet { #[pallet::constant] type DepositPerItem: Get>; + /// The percentage of the storage deposit that should be held for using a code hash. + /// Instantiating a contract, or calling [`chain_extension::Ext::add_delegate_dependency`] + /// protects the code from being removed. In order to prevent abuse these actions are + /// protected with a percentage of the code deposit. + #[pallet::constant] + type CodeHashLockupDepositPercent: Get; + /// The address generator used to generate the addresses of contracts. type AddressGenerator: AddressGenerator; - /// The maximum length of a contract code in bytes. This limit applies to the instrumented - /// version of the code. Therefore `instantiate_with_code` can fail even when supplying - /// a wasm binary below this maximum size. + /// The maximum length of a contract code in bytes. /// /// The value should be chosen carefully taking into the account the overall memory limit /// your runtime has, as well as the [maximum allowed callstack @@ -296,6 +310,11 @@ pub mod pallet { #[pallet::constant] type MaxStorageKeyLen: Get; + /// The maximum number of delegate_dependencies that a contract can lock with + /// [`chain_extension::Ext::add_delegate_dependency`]. + #[pallet::constant] + type MaxDelegateDependencies: Get; + /// Make contract callable functions marked as `#[unstable]` available. /// /// Contracts that use `#[unstable]` functions won't be able to be uploaded unless @@ -311,16 +330,66 @@ pub mod pallet { /// The maximum length of the debug buffer in bytes. #[pallet::constant] type MaxDebugBufferLen: Get; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// The sequence of migration steps that will be applied during a migration. + /// + /// # Examples + /// ``` + /// use pallet_contracts::migration::{v10, v11}; + /// # struct Runtime {}; + /// # struct Currency {}; + /// type Migrations = (v10::Migration, v11::Migration); + /// ``` + /// + /// If you have a single migration step, you can use a tuple with a single element: + /// ``` + /// use pallet_contracts::migration::v10; + /// # struct Runtime {}; + /// # struct Currency {}; + /// type Migrations = (v10::Migration,); + /// ``` + type Migrations: MigrateSequence; + + /// Type that provides debug handling for the contract execution process. + /// + /// # Warning + /// + /// Do **not** use it in a production environment or for benchmarking purposes. + #[cfg(feature = "unsafe-debug")] + type Debug: unsafe_debug::UnsafeDebug; } #[pallet::hooks] impl Hooks> for Pallet { - fn on_idle(_block: T::BlockNumber, remaining_weight: Weight) -> Weight { + fn on_idle(_block: BlockNumberFor, mut remaining_weight: Weight) -> Weight { + use migration::MigrateResult::*; + + loop { + let (result, weight) = Migration::::migrate(remaining_weight); + remaining_weight.saturating_reduce(weight); + + match result { + // There is not enough weight to perform a migration, or make any progress, we + // just return the remaining weight. + NoMigrationPerformed | InProgress { steps_done: 0 } => return remaining_weight, + // Migration is still in progress, we can start the next step. + InProgress { .. } => continue, + // Either no migration is in progress, or we are done with all migrations, we + // can do some more other work with the remaining weight. + Completed | NoMigrationInProgress => break, + } + } + ContractInfo::::process_deletion_queue_batch(remaining_weight) .saturating_add(T::WeightInfo::on_process_deletion_queue_batch()) } fn integrity_test() { + Migration::::integrity_test(); + // Total runtime memory limit let max_runtime_mem: u32 = T::Schedule::get().limits.runtime_memory; // Memory limits for a single contract: @@ -334,19 +403,19 @@ pub mod pallet { // Check that given configured `MaxCodeLen`, runtime heap memory limit can't be broken. // - // In worst case, the decoded wasm contract code would be `x16` times larger than the + // In worst case, the decoded Wasm contract code would be `x16` times larger than the // encoded one. This is because even a single-byte wasm instruction has 16-byte size in // wasmi. This gives us `MaxCodeLen*16` safety margin. // - // Next, the pallet keeps both the original and instrumented wasm blobs for each - // contract, hence we add up `MaxCodeLen*2` more to the safety margin. + // Next, the pallet keeps the Wasm blob for each + // contract, hence we add up `MaxCodeLen` to the safety margin. // // Finally, the inefficiencies of the freeing-bump allocator // being used in the client for the runtime memory allocations, could lead to possible // memory allocations for contract code grow up to `x4` times in some extreme cases, - // which gives us total multiplier of `18*4` for `MaxCodeLen`. + // which gives us total multiplier of `17*4` for `MaxCodeLen`. // - // That being said, for every contract executed in runtime, at least `MaxCodeLen*18*4` + // That being said, for every contract executed in runtime, at least `MaxCodeLen*17*4` // memory should be available. Note that maximum allowed heap memory and stack size per // each contract (stack frame) should also be counted. // @@ -355,7 +424,7 @@ pub mod pallet { // // This gives us the following formula: // - // `(MaxCodeLen * 18 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth < + // `(MaxCodeLen * 17 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth < // max_runtime_mem/2` // // Hence the upper limit for the `MaxCodeLen` can be defined as follows: @@ -364,7 +433,7 @@ pub mod pallet { .saturating_div(max_call_depth) .saturating_sub(max_heap_size) .saturating_sub(MAX_STACK_SIZE) - .saturating_div(18 * 4); + .saturating_div(17 * 4); assert!( T::MaxCodeLen::get() < code_len_limit, @@ -475,7 +544,7 @@ pub mod pallet { /// /// If the code does not already exist a deposit is reserved from the caller /// and unreserved only when [`Self::remove_code`] is called. The size of the reserve - /// depends on the instrumented size of the the supplied `code`. + /// depends on the size of the supplied `code`. /// /// If the code already exists in storage it will still return `Ok` and upgrades /// the in storage version to the current @@ -499,6 +568,7 @@ pub mod pallet { storage_deposit_limit: Option< as codec::HasCompact>::Type>, determinism: Determinism, ) -> DispatchResult { + Migration::::ensure_migrated()?; let origin = ensure_signed(origin)?; Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) .map(|_| ()) @@ -514,8 +584,9 @@ pub mod pallet { origin: OriginFor, code_hash: CodeHash, ) -> DispatchResultWithPostInfo { + Migration::::ensure_migrated()?; let origin = ensure_signed(origin)?; - >::remove(&origin, code_hash)?; + >::remove(&origin, code_hash)?; // we waive the fee because removing unused code is beneficial Ok(Pays::No.into()) } @@ -537,6 +608,7 @@ pub mod pallet { dest: AccountIdLookupOf, code_hash: CodeHash, ) -> DispatchResult { + Migration::::ensure_migrated()?; ensure_root(origin)?; let dest = T::Lookup::lookup(dest)?; >::try_mutate(&dest, |contract| { @@ -545,8 +617,8 @@ pub mod pallet { } else { return Err(>::ContractNotFound.into()) }; - >::add_user(code_hash)?; - >::remove_user(contract.code_hash); + >::increment_refcount(code_hash)?; + >::decrement_refcount(contract.code_hash); Self::deposit_event( vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], Event::ContractCodeUpdated { @@ -586,6 +658,7 @@ pub mod pallet { storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { + Migration::::ensure_migrated()?; let common = CommonInput { origin: Origin::from_runtime_origin(origin)?, value, @@ -624,8 +697,7 @@ pub mod pallet { /// /// Instantiation is executed as follows: /// - /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that - /// code. + /// - The supplied `code` is deployed, and a `code_hash` is created for that code. /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. /// - The destination address is computed based on the sender, code_hash and the salt. /// - The smart-contract account is created at the computed address. @@ -645,26 +717,43 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + Migration::::ensure_migrated()?; + let origin = ensure_signed(origin)?; let code_len = code.len() as u32; + + let (module, upload_deposit) = Self::try_upload_code( + origin.clone(), + code, + storage_deposit_limit.clone().map(Into::into), + Determinism::Enforced, + None, + )?; + + // Reduces the storage deposit limit by the amount that was reserved for the upload. + let storage_deposit_limit = + storage_deposit_limit.map(|limit| limit.into().saturating_sub(upload_deposit)); + let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_runtime_origin(origin)?, + origin: Origin::from_account_id(origin), value, data, gas_limit, - storage_deposit_limit: storage_deposit_limit.map(Into::into), + storage_deposit_limit, debug_message: None, }; + let mut output = - InstantiateInput:: { code: Code::Upload(code), salt }.run_guarded(common); + InstantiateInput:: { code: WasmCode::Wasm(module), salt }.run_guarded(common); if let Ok(retval) = &output.result { if retval.1.did_revert() { output.result = Err(>::ContractReverted.into()); } } + output.gas_meter.into_dispatch_result( - output.result.map(|(_address, result)| result), + output.result.map(|(_address, output)| output), T::WeightInfo::instantiate_with_code(code_len, data_len, salt_len), ) } @@ -687,6 +776,7 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + Migration::::ensure_migrated()?; let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { @@ -697,8 +787,8 @@ pub mod pallet { storage_deposit_limit: storage_deposit_limit.map(Into::into), debug_message: None, }; - let mut output = - InstantiateInput:: { code: Code::Existing(code_hash), salt }.run_guarded(common); + let mut output = InstantiateInput:: { code: WasmCode::CodeHash(code_hash), salt } + .run_guarded(common); if let Ok(retval) = &output.result { if retval.1.did_revert() { output.result = Err(>::ContractReverted.into()); @@ -709,6 +799,33 @@ pub mod pallet { T::WeightInfo::instantiate(data_len, salt_len), ) } + + /// When a migration is in progress, this dispatchable can be used to run migration steps. + /// Calls that contribute to advancing the migration have their fees waived, as it's helpful + /// for the chain. Note that while the migration is in progress, the pallet will also + /// leverage the `on_idle` hooks to run migration steps. + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::migrate().saturating_add(*weight_limit))] + pub fn migrate(origin: OriginFor, weight_limit: Weight) -> DispatchResultWithPostInfo { + use migration::MigrateResult::*; + ensure_signed(origin)?; + + let weight_limit = weight_limit.saturating_add(T::WeightInfo::migrate()); + let (result, weight) = Migration::::migrate(weight_limit); + + match result { + Completed => + Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::No }), + InProgress { steps_done, .. } if steps_done > 0 => + Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::No }), + InProgress { .. } => + Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::Yes }), + NoMigrationInProgress | NoMigrationPerformed => { + let err: DispatchError = >::NoMigrationPerformed.into(); + Err(err.with_weight(T::WeightInfo::migrate())) + }, + } + } } #[pallet::event] @@ -730,7 +847,7 @@ pub mod pallet { }, /// Code with the specified hash has been stored. - CodeStored { code_hash: T::Hash }, + CodeStored { code_hash: T::Hash, deposit_held: BalanceOf, uploader: T::AccountId }, /// A custom event emitted by the contract. ContractEmitted { @@ -742,7 +859,7 @@ pub mod pallet { }, /// A code with the specified hash was removed. - CodeRemoved { code_hash: T::Hash }, + CodeRemoved { code_hash: T::Hash, deposit_released: BalanceOf, remover: T::AccountId }, /// A contract's code was updated. ContractCodeUpdated { @@ -786,8 +903,8 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// A new schedule must have a greater version than the current one. - InvalidScheduleVersion, + /// Invalid schedule supplied, e.g. with zero weight of a basic operation. + InvalidSchedule, /// Invalid combination of flags supplied to `seal_call` or `seal_delegate_call`. InvalidCallFlags, /// The executed contract exhausted its gas limit. @@ -807,6 +924,8 @@ pub mod pallet { CodeTooLarge, /// No code could be found at the supplied code hash. CodeNotFound, + /// No code info could be found at the supplied code hash. + CodeInfoNotFound, /// A buffer outside of sandbox memory was passed to a contract API function. OutOfBounds, /// Input passed to a contract API function failed to decode as expected type. @@ -850,7 +969,7 @@ pub mod pallet { /// or via RPC an `Ok` will be returned. In this case the caller needs to inspect the flags /// to determine whether a reversion has taken place. ContractReverted, - /// The contract's code was found to be invalid during validation or instrumentation. + /// The contract's code was found to be invalid during validation. /// /// The most likely cause of this is that an API was used which is not supported by the /// node. This happens if an older node is used with a new version of ink!. Try updating @@ -861,20 +980,34 @@ pub mod pallet { CodeRejected, /// An indetermistic code was used in a context where this is not permitted. Indeterministic, + /// A pending migration needs to complete before the extrinsic can be called. + MigrationInProgress, + /// Migrate dispatch call was attempted but no migration was performed. + NoMigrationPerformed, + /// The contract has reached its maximum number of delegate dependencies. + MaxDelegateDependenciesReached, + /// The dependency was not found in the contract's delegate dependencies. + DelegateDependencyNotFound, + /// The contract already depends on the given delegate dependency. + DelegateDependencyAlreadyExists, + /// Can not add a delegate dependency to the code hash of the contract itself. + CannotAddSelfAsDelegateDependency, } - /// A mapping from an original code hash to the original code, untouched by instrumentation. - #[pallet::storage] - pub(crate) type PristineCode = StorageMap<_, Identity, CodeHash, CodeVec>; + /// A reason for the pallet contracts placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The Pallet has reserved it for storing code on-chain. + CodeUploadDepositReserve, + } - /// A mapping between an original code hash and instrumented wasm code, ready for execution. + /// A mapping from a contract's code hash to its code. #[pallet::storage] - pub(crate) type CodeStorage = - StorageMap<_, Identity, CodeHash, PrefabWasmModule>; + pub(crate) type PristineCode = StorageMap<_, Identity, CodeHash, CodeVec>; - /// A mapping between an original code hash and its owner information. + /// A mapping from a contract's code hash to its code info. #[pallet::storage] - pub(crate) type OwnerInfoOf = StorageMap<_, Identity, CodeHash, OwnerInfo>; + pub(crate) type CodeInfoOf = StorageMap<_, Identity, CodeHash, CodeInfo>; /// This is a **monotonic** counter incremented on contract instantiation. /// @@ -920,6 +1053,12 @@ pub mod pallet { #[pallet::storage] pub(crate) type DeletionQueueCounter = StorageValue<_, DeletionQueueManager, ValueQuery>; + + /// A migration can span across multiple blocks. This storage defines a cursor to track the + /// progress of the migration, enabling us to resume from the last completed position. + #[pallet::storage] + pub(crate) type MigrationInProgress = + StorageValue<_, migration::Cursor, OptionQuery>; } /// The type of origins supported by the contracts pallet. @@ -967,14 +1106,22 @@ struct CallInput { determinism: Determinism, } +/// Reference to an existing code hash or a new wasm module. +enum WasmCode { + Wasm(WasmBlob), + CodeHash(CodeHash), +} + /// Input specific to a contract instantiation invocation. struct InstantiateInput { - code: Code>, + code: WasmCode, salt: Vec, } /// Determines whether events should be collected during execution. -#[derive(PartialEq)] +#[derive( + Copy, Clone, PartialEq, Eq, RuntimeDebug, Decode, Encode, MaxEncodedLen, scale_info::TypeInfo, +)] pub enum CollectEvents { /// Collect events. /// @@ -990,7 +1137,9 @@ pub enum CollectEvents { } /// Determines whether debug messages will be collected. -#[derive(PartialEq)] +#[derive( + Copy, Clone, PartialEq, Eq, RuntimeDebug, Decode, Encode, MaxEncodedLen, scale_info::TypeInfo, +)] pub enum DebugInfo { /// Collect debug messages. /// # Note @@ -1014,7 +1163,7 @@ struct InternalOutput { /// Helper trait to wrap contract execution entry points into a single function /// [`Invokable::run_guarded`]. -trait Invokable { +trait Invokable: Sized { /// What is returned as a result of a successful invocation. type Output; @@ -1026,7 +1175,7 @@ trait Invokable { /// /// We enforce a re-entrancy guard here by initializing and checking a boolean flag through a /// global reference. - fn run_guarded(&self, common: CommonInput) -> InternalOutput { + fn run_guarded(self, common: CommonInput) -> InternalOutput { // Set up a global reference to the boolean flag used for the re-entrancy guard. environmental!(executing_contract: bool); @@ -1074,11 +1223,8 @@ trait Invokable { /// contract or a instantiation of a new one. /// /// Called by dispatchables and public functions through the [`Invokable::run_guarded`]. - fn run( - &self, - common: CommonInput, - gas_meter: GasMeter, - ) -> InternalOutput; + fn run(self, common: CommonInput, gas_meter: GasMeter) + -> InternalOutput; /// This method ensures that the given `origin` is allowed to invoke the current `Invokable`. /// @@ -1090,7 +1236,7 @@ impl Invokable for CallInput { type Output = ExecReturnValue; fn run( - &self, + self, common: CommonInput, mut gas_meter: GasMeter, ) -> InternalOutput { @@ -1107,7 +1253,7 @@ impl Invokable for CallInput { }, }; let schedule = T::Schedule::get(); - let result = ExecStack::>::run_call( + let result = ExecStack::>::run_call( origin.clone(), dest.clone(), &mut gas_meter, @@ -1116,7 +1262,7 @@ impl Invokable for CallInput { value, data.clone(), debug_message, - *determinism, + determinism, ); match storage_meter.try_into_deposit(&origin) { @@ -1138,8 +1284,8 @@ impl Invokable for InstantiateInput { type Output = (AccountIdOf, ExecReturnValue); fn run( - &self, - mut common: CommonInput, + self, + common: CommonInput, mut gas_meter: GasMeter, ) -> InternalOutput { let mut storage_deposit = Default::default(); @@ -1148,41 +1294,17 @@ impl Invokable for InstantiateInput { let InstantiateInput { salt, .. } = self; let CommonInput { origin: contract_origin, .. } = common; let origin = contract_origin.account_id()?; - let (extra_deposit, executable) = match &self.code { - Code::Upload(binary) => { - let executable = PrefabWasmModule::from_code( - binary.clone(), - &schedule, - origin.clone(), - Determinism::Enforced, - TryInstantiate::Skip, - ) - .map_err(|(err, msg)| { - common - .debug_message - .as_mut() - .map(|buffer| buffer.try_extend(&mut msg.bytes())); - err - })?; - // The open deposit will be charged during execution when the - // uploaded module does not already exist. This deposit is not part of the - // storage meter because it is not transferred to the contract but - // reserved on the uploading account. - (executable.open_deposit(), executable) - }, - Code::Existing(hash) => ( - Default::default(), - PrefabWasmModule::from_storage(*hash, &schedule, &mut gas_meter)?, - ), + + let executable = match self.code { + WasmCode::Wasm(module) => module, + WasmCode::CodeHash(code_hash) => WasmBlob::from_storage(code_hash, &mut gas_meter)?, }; + let contract_origin = Origin::from_account_id(origin.clone()); - let mut storage_meter = StorageMeter::new( - &contract_origin, - common.storage_deposit_limit, - common.value.saturating_add(extra_deposit), - )?; + let mut storage_meter = + StorageMeter::new(&contract_origin, common.storage_deposit_limit, common.value)?; let CommonInput { value, data, debug_message, .. } = common; - let result = ExecStack::>::run_instantiate( + let result = ExecStack::>::run_instantiate( origin.clone(), executable, &mut gas_meter, @@ -1194,9 +1316,7 @@ impl Invokable for InstantiateInput { debug_message, ); - storage_deposit = storage_meter - .try_into_deposit(&contract_origin)? - .saturating_add(&StorageDeposit::Charge(extra_deposit)); + storage_deposit = storage_meter.try_into_deposit(&contract_origin)?; result }; InternalOutput { result: try_exec(), gas_meter, storage_deposit } @@ -1210,6 +1330,21 @@ impl Invokable for InstantiateInput { } } +macro_rules! ensure_no_migration_in_progress { + () => { + if Migration::::in_progress() { + return ContractResult { + gas_consumed: Zero::zero(), + gas_required: Zero::zero(), + storage_deposit: Default::default(), + debug_message: Vec::new(), + result: Err(Error::::MigrationInProgress.into()), + events: None, + } + } + }; +} + impl Pallet { /// Perform a call to a specified contract. /// @@ -1234,6 +1369,8 @@ impl Pallet { collect_events: CollectEvents, determinism: Determinism, ) -> ContractExecResult, EventRecordOf> { + ensure_no_migration_in_progress!(); + let mut debug_message = if matches!(debug, DebugInfo::UnsafeDebug) { Some(DebugBufferVec::::default()) } else { @@ -1283,18 +1420,59 @@ impl Pallet { origin: T::AccountId, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: Option>, + mut storage_deposit_limit: Option>, code: Code>, data: Vec, salt: Vec, debug: DebugInfo, collect_events: CollectEvents, ) -> ContractInstantiateResult, EventRecordOf> { + ensure_no_migration_in_progress!(); + let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBufferVec::::default()) } else { None }; + // collect events if CollectEvents is UnsafeCollect + let events = || { + if collect_events == CollectEvents::UnsafeCollect { + Some(System::::read_events_no_consensus().map(|e| *e).collect()) + } else { + None + } + }; + + let (code, upload_deposit): (WasmCode, BalanceOf) = match code { + Code::Upload(code) => { + let result = Self::try_upload_code( + origin.clone(), + code, + storage_deposit_limit.map(Into::into), + Determinism::Enforced, + debug_message.as_mut(), + ); + + let (module, deposit) = match result { + Ok(result) => result, + Err(error) => + return ContractResult { + gas_consumed: Zero::zero(), + gas_required: Zero::zero(), + storage_deposit: Default::default(), + debug_message: debug_message.unwrap_or(Default::default()).into(), + result: Err(error), + events: events(), + }, + }; + + storage_deposit_limit = + storage_deposit_limit.map(|l| l.saturating_sub(deposit.into())); + (WasmCode::Wasm(module), deposit) + }, + Code::Existing(hash) => (WasmCode::CodeHash(hash), Default::default()), + }; + let common = CommonInput { origin: Origin::from_account_id(origin), value, @@ -1303,13 +1481,8 @@ impl Pallet { storage_deposit_limit, debug_message: debug_message.as_mut(), }; + let output = InstantiateInput:: { code, salt }.run_guarded(common); - // collect events if CollectEvents is UnsafeCollect - let events = if collect_events == CollectEvents::UnsafeCollect { - Some(System::::read_events_no_consensus().map(|e| *e).collect()) - } else { - None - }; ContractInstantiateResult { result: output .result @@ -1317,9 +1490,11 @@ impl Pallet { .map_err(|e| e.error), gas_consumed: output.gas_meter.gas_consumed(), gas_required: output.gas_meter.gas_required(), - storage_deposit: output.storage_deposit, + storage_deposit: output + .storage_deposit + .saturating_add(&StorageDeposit::Charge(upload_deposit)), debug_message: debug_message.unwrap_or_default().to_vec(), - events, + events: events(), } } @@ -1333,26 +1508,39 @@ impl Pallet { storage_deposit_limit: Option>, determinism: Determinism, ) -> CodeUploadResult, BalanceOf> { + Migration::::ensure_migrated()?; + let (module, deposit) = + Self::try_upload_code(origin, code, storage_deposit_limit, determinism, None)?; + Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) + } + + /// Uploads new code and returns the Wasm blob and deposit amount collected. + fn try_upload_code( + origin: T::AccountId, + code: Vec, + storage_deposit_limit: Option>, + determinism: Determinism, + mut debug_message: Option<&mut DebugBufferVec>, + ) -> Result<(WasmBlob, BalanceOf), DispatchError> { let schedule = T::Schedule::get(); - let module = PrefabWasmModule::from_code( - code, - &schedule, - origin, - determinism, - TryInstantiate::Instantiate, - ) - .map_err(|(err, _)| err)?; - let deposit = module.open_deposit(); + let mut module = + WasmBlob::from_code(code, &schedule, origin, determinism).map_err(|(err, msg)| { + debug_message.as_mut().map(|d| d.try_extend(msg.bytes())); + err + })?; + let deposit = module.store_code()?; if let Some(storage_deposit_limit) = storage_deposit_limit { ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); } - let result = CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }; - module.store()?; - Ok(result) + + Ok((module, deposit)) } /// Query storage of a specified contract under a specified key. pub fn get_storage(address: T::AccountId, key: Vec) -> GetStorageResult { + if Migration::::in_progress() { + return Err(ContractAccessError::MigrationInProgress) + } let contract_info = ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; @@ -1382,26 +1570,17 @@ impl Pallet { ContractInfo::::load_code_hash(account) } - /// Store code for benchmarks which does not check nor instrument the code. + /// Store code for benchmarks which does not validate the code. #[cfg(feature = "runtime-benchmarks")] fn store_code_raw( code: Vec, owner: T::AccountId, ) -> frame_support::dispatch::DispatchResult { let schedule = T::Schedule::get(); - PrefabWasmModule::store_code_unchecked(code, &schedule, owner)?; + WasmBlob::::from_code_unchecked(code, &schedule, owner)?.store_code()?; Ok(()) } - /// This exists so that benchmarks can determine the weight of running an instrumentation. - #[cfg(feature = "runtime-benchmarks")] - fn reinstrument_module( - module: &mut PrefabWasmModule, - schedule: &Schedule, - ) -> frame_support::dispatch::DispatchResult { - self::wasm::reinstrument(module, schedule).map(|_| ()) - } - /// Deposit a pallet contracts event. Handles the conversion to the overarching event type. fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 96a4c3203474c..106b68dc4416e 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -15,459 +15,617 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{BalanceOf, CodeHash, Config, Pallet, TrieId, Weight}; -use codec::{Decode, Encode}; +//! Multi-block Migration framework for pallet-contracts. +//! +//! This module allows us to define a migration as a sequence of [`MigrationStep`]s that can be +//! executed across multiple blocks. +//! +//! # Usage +//! +//! A migration step is defined under `src/migration/vX.rs`, where `X` is the version number. +//! For example, `vX.rs` defines a migration from version `X - 1` to version `X`. +//! +//! ## Example: +//! +//! To configure a migration to `v11` for a runtime using `v10` of pallet-contracts on the chain, +//! you would set the `Migrations` type as follows: +//! +//! ``` +//! use pallet_contracts::migration::{v10, v11}; +//! # pub enum Runtime {}; +//! # struct Currency; +//! type Migrations = (v10::Migration, v11::Migration); +//! ``` +//! +//! ## Notes: +//! +//! - Migrations should always be tested with `try-runtime` before being deployed. +//! - By testing with `try-runtime` against a live network, you ensure that all migration steps work +//! and that you have included the required steps. +//! +//! ## Low Level / Implementation Details +//! +//! When a migration starts and [`OnRuntimeUpgrade::on_runtime_upgrade`] is called, instead of +//! performing the actual migration, we set a custom storage item [`MigrationInProgress`]. +//! This storage item defines a [`Cursor`] for the current migration. +//! +//! If the [`MigrationInProgress`] storage item exists, it means a migration is in progress, and its +//! value holds a cursor for the current migration step. These migration steps are executed during +//! [`Hooks::on_idle`] or when the [`Pallet::migrate`] dispatchable is +//! called. +//! +//! While the migration is in progress, all dispatchables except `migrate`, are blocked, and returns +//! a `MigrationInProgress` error. + +pub mod v09; +pub mod v10; +pub mod v11; +pub mod v12; +pub mod v13; +pub mod v14; +include!(concat!(env!("OUT_DIR"), "/migration_codegen.rs")); + +use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET}; +use codec::{Codec, Decode}; use frame_support::{ codec, pallet_prelude::*, - storage::migration, - storage_alias, - traits::{Get, OnRuntimeUpgrade}, - Identity, Twox64Concat, + traits::{ConstU32, OnRuntimeUpgrade}, }; -use sp_runtime::traits::Saturating; -use sp_std::{marker::PhantomData, prelude::*}; +use sp_runtime::Saturating; +use sp_std::marker::PhantomData; -/// Performs all necessary migrations based on `StorageVersion`. -pub struct Migration(PhantomData); -impl OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - let version = >::on_chain_storage_version(); - let mut weight = Weight::zero(); +#[cfg(feature = "try-runtime")] +use sp_std::prelude::*; - if version < 4 { - v4::migrate::(&mut weight); - } +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; - if version < 5 { - v5::migrate::(&mut weight); - } +const PROOF_ENCODE: &str = "Tuple::max_encoded_len() < Cursor::max_encoded_len()` is verified in `Self::integrity_test()`; qed"; +const PROOF_DECODE: &str = + "We encode to the same type in this trait only. No other code touches this item; qed"; - if version < 6 { - v6::migrate::(&mut weight); - } +fn invalid_version(version: StorageVersion) -> ! { + panic!("Required migration {version:?} not supported by this runtime. This is a bug."); +} - if version < 7 { - v7::migrate::(&mut weight); - } +/// The cursor used to encode the position (usually the last iterated key) of the current migration +/// step. +pub type Cursor = BoundedVec>; - if version < 8 { - v8::migrate::(&mut weight); - } +/// IsFinished describes whether a migration is finished or not. +pub enum IsFinished { + Yes, + No, +} - if version < 9 { - v9::migrate::(&mut weight); +/// A trait that allows to migrate storage from one version to another. +/// +/// The migration is done in steps. The migration is finished when +/// `step()` returns `IsFinished::Yes`. +pub trait MigrationStep: Codec + MaxEncodedLen + Default { + /// Returns the version of the migration. + const VERSION: u16; + + /// Returns the maximum weight that can be consumed in a single step. + fn max_step_weight() -> Weight; + + /// Process one step of the migration. + /// + /// Returns whether the migration is finished and the weight consumed. + fn step(&mut self) -> (IsFinished, Weight); + + /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater + /// than `max_block_weight`. + fn integrity_test(max_block_weight: Weight) { + if Self::max_step_weight().any_gt(max_block_weight) { + panic!( + "Invalid max_step_weight for Migration {}. Value should be lower than {}", + Self::VERSION, + max_block_weight + ); } - StorageVersion::new(9).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight + let len = ::max_encoded_len(); + let max = Cursor::bound(); + if len > max { + panic!( + "Migration {} has size {} which is bigger than the maximum of {}", + Self::VERSION, + len, + max, + ); + } } + /// Execute some pre-checks prior to running the first step of this migration. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let version = >::on_chain_storage_version(); - - if version == 7 { - v8::pre_upgrade::()?; - } - - Ok(version.encode()) + fn pre_upgrade_step() -> Result, TryRuntimeError> { + Ok(Vec::new()) } + /// Execute some post-checks after running the last step of this migration. #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let version = Decode::decode(&mut state.as_ref()).map_err(|_| "Cannot decode version")?; - post_checks::post_upgrade::(version) + fn post_upgrade_step(_state: Vec) -> Result<(), TryRuntimeError> { + Ok(()) } } -/// V4: `Schedule` is changed to be a config item rather than an in-storage value. -mod v4 { - use super::*; +/// A noop migration that can be used when there is no migration to be done for a given version. +#[doc(hidden)] +#[derive(frame_support::DefaultNoBound, Encode, Decode, MaxEncodedLen)] +pub struct NoopMigration; - pub fn migrate(weight: &mut Weight) { - #[allow(deprecated)] - migration::remove_storage_prefix(>::name().as_bytes(), b"CurrentSchedule", b""); - weight.saturating_accrue(T::DbWeight::get().writes(1)); +impl MigrationStep for NoopMigration { + const VERSION: u16 = N; + fn max_step_weight() -> Weight { + Weight::zero() + } + fn step(&mut self) -> (IsFinished, Weight) { + log::debug!(target: LOG_TARGET, "Noop migration for version {}", N); + (IsFinished::Yes, Weight::zero()) } } -/// V5: State rent is removed which obsoletes some fields in `ContractInfo`. -mod v5 { - use super::*; +mod private { + use crate::migration::MigrationStep; + pub trait Sealed {} + #[impl_trait_for_tuples::impl_for_tuples(10)] + #[tuple_types_custom_trait_bound(MigrationStep)] + impl Sealed for Tuple {} +} - type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; - type TombstoneContractInfo = RawTombstoneContractInfo< - ::Hash, - ::Hashing, - >; - - #[derive(Decode)] - enum OldContractInfo { - Alive(AliveContractInfo), - Tombstone(TombstoneContractInfo), - } +/// Defines a sequence of migrations. +/// +/// The sequence must be defined by a tuple of migrations, each of which must implement the +/// `MigrationStep` trait. Migrations must be ordered by their versions with no gaps. +pub trait MigrateSequence: private::Sealed { + /// Returns the range of versions that this migrations sequence can handle. + /// Migrations must be ordered by their versions with no gaps. + /// + /// The following code will fail to compile: + /// + /// ```compile_fail + /// # use pallet_contracts::{NoopMigration, MigrateSequence}; + /// let _ = <(NoopMigration<1>, NoopMigration<3>)>::VERSION_RANGE; + /// ``` + /// The following code will compile: + /// ``` + /// # use pallet_contracts::{NoopMigration, MigrateSequence}; + /// let _ = <(NoopMigration<1>, NoopMigration<2>)>::VERSION_RANGE; + /// ``` + const VERSION_RANGE: (u16, u16); + + /// Returns the default cursor for the given version. + fn new(version: StorageVersion) -> Cursor; - #[derive(Decode)] - struct RawAliveContractInfo { - trie_id: TrieId, - _storage_size: u32, - _pair_count: u32, - code_hash: CodeHash, - _rent_allowance: Balance, - _rent_paid: Balance, - _deduct_block: BlockNumber, - _last_write: Option, - _reserved: Option<()>, + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step(_version: StorageVersion) -> Result, TryRuntimeError> { + Ok(Vec::new()) } - #[derive(Decode)] - struct RawTombstoneContractInfo(H, PhantomData); - - #[derive(Decode)] - struct OldDeletedContract { - _pair_count: u32, - trie_id: TrieId, + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(_version: StorageVersion, _state: Vec) -> Result<(), TryRuntimeError> { + Ok(()) } - pub type ContractInfo = RawContractInfo>; + /// Execute the migration step until the weight limit is reached. + fn steps(version: StorageVersion, cursor: &[u8], weight_left: &mut Weight) -> StepResult; - #[derive(Encode, Decode)] - pub struct RawContractInfo { - pub trie_id: TrieId, - pub code_hash: CodeHash, - pub _reserved: Option<()>, - } + /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater + /// than `max_block_weight`. + fn integrity_test(max_block_weight: Weight); - #[derive(Encode, Decode)] - struct DeletedContract { - trie_id: TrieId, + /// Returns whether migrating from `in_storage` to `target` is supported. + /// + /// A migration is supported if `VERSION_RANGE` is (in_storage + 1, target). + fn is_upgrade_supported(in_storage: StorageVersion, target: StorageVersion) -> bool { + let (low, high) = Self::VERSION_RANGE; + target == high && in_storage + 1 == low } +} - #[storage_alias] - type ContractInfoOf = StorageMap< - Pallet, - Twox64Concat, - ::AccountId, - ContractInfo, - >; - - #[storage_alias] - type DeletionQueue = StorageValue, Vec>; - - pub fn migrate(weight: &mut Weight) { - >::translate(|_key, old: OldContractInfo| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - match old { - OldContractInfo::Alive(old) => Some(ContractInfo:: { - trie_id: old.trie_id, - code_hash: old.code_hash, - _reserved: old._reserved, - }), - OldContractInfo::Tombstone(_) => None, +/// Performs all necessary migrations based on `StorageVersion`. +/// +/// If `TEST_ALL_STEPS == true` and `try-runtime` is enabled, this will run all the migrations +/// inside `on_runtime_upgrade`. This should be set to false in tests that want to ensure the step +/// by step migration works. +pub struct Migration(PhantomData); + +#[cfg(feature = "try-runtime")] +impl Migration { + fn run_all_steps() -> Result<(), TryRuntimeError> { + let mut weight = Weight::zero(); + let name = >::name(); + loop { + let in_progress_version = >::on_chain_storage_version() + 1; + let state = T::Migrations::pre_upgrade_step(in_progress_version)?; + let (status, w) = Self::migrate(Weight::MAX); + weight.saturating_accrue(w); + log::info!( + target: LOG_TARGET, + "{name}: Migration step {:?} weight = {}", + in_progress_version, + weight + ); + T::Migrations::post_upgrade_step(in_progress_version, state)?; + if matches!(status, MigrateResult::Completed) { + break } - }); + } - DeletionQueue::::translate(|old: Option>| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect()) - }) - .ok(); + let name = >::name(); + log::info!(target: LOG_TARGET, "{name}: Migration steps weight = {}", weight); + Ok(()) } } -/// V6: Added storage deposits -mod v6 { - use super::*; +impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + let name = >::name(); + let latest_version = >::current_storage_version(); + let storage_version = >::on_chain_storage_version(); + + if storage_version == latest_version { + log::warn!( + target: LOG_TARGET, + "{name}: No Migration performed storage_version = latest_version = {:?}", + &storage_version + ); + return T::WeightInfo::on_runtime_upgrade_noop() + } - #[derive(Encode, Decode)] - struct OldPrefabWasmModule { - #[codec(compact)] - instruction_weights_version: u32, - #[codec(compact)] - initial: u32, - #[codec(compact)] - maximum: u32, - #[codec(compact)] - refcount: u64, - _reserved: Option<()>, - code: Vec, - original_code_len: u32, - } + // In case a migration is already in progress we create the next migration + // (if any) right when the current one finishes. + if Self::in_progress() { + log::warn!( + target: LOG_TARGET, + "{name}: Migration already in progress {:?}", + &storage_version + ); - #[derive(Encode, Decode)] - pub struct PrefabWasmModule { - #[codec(compact)] - pub instruction_weights_version: u32, - #[codec(compact)] - pub initial: u32, - #[codec(compact)] - pub maximum: u32, - pub code: Vec, - } + return T::WeightInfo::on_runtime_upgrade_in_progress() + } - use v5::ContractInfo as OldContractInfo; + log::info!( + target: LOG_TARGET, + "{name}: Upgrading storage from {storage_version:?} to {latest_version:?}.", + ); - #[derive(Encode, Decode)] - pub struct RawContractInfo { - pub trie_id: TrieId, - pub code_hash: CodeHash, - pub storage_deposit: Balance, - } + let cursor = T::Migrations::new(storage_version + 1); + MigrationInProgress::::set(Some(cursor)); - #[derive(Encode, Decode)] - pub struct OwnerInfo { - owner: T::AccountId, - #[codec(compact)] - deposit: BalanceOf, - #[codec(compact)] - refcount: u64, - } + #[cfg(feature = "try-runtime")] + if TEST_ALL_STEPS { + Self::run_all_steps().unwrap(); + } - pub type ContractInfo = RawContractInfo, BalanceOf>; - - #[storage_alias] - type ContractInfoOf = StorageMap< - Pallet, - Twox64Concat, - ::AccountId, - ContractInfo, - >; - - #[storage_alias] - type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; - - #[storage_alias] - type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; - - pub fn migrate(weight: &mut Weight) { - >::translate(|_key, old: OldContractInfo| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - Some(ContractInfo:: { - trie_id: old.trie_id, - code_hash: old.code_hash, - storage_deposit: Default::default(), - }) - }); + T::WeightInfo::on_runtime_upgrade() + } - let nobody = T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) - .expect("Infinite input; no dead input space; qed"); - - >::translate(|key, old: OldPrefabWasmModule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - >::insert( - key, - OwnerInfo { - refcount: old.refcount, - owner: nobody.clone(), - deposit: Default::default(), - }, - ); - Some(PrefabWasmModule { - instruction_weights_version: old.instruction_weights_version, - initial: old.initial, - maximum: old.maximum, - code: old.code, - }) - }); + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + // We can't really do much here as our migrations do not happen during the runtime upgrade. + // Instead, we call the migrations `pre_upgrade` and `post_upgrade` hooks when we iterate + // over our migrations. + let storage_version = >::on_chain_storage_version(); + let target_version = >::current_storage_version(); + + ensure!( + storage_version != target_version, + "No upgrade: Please remove this migration from your runtime upgrade configuration." + ); + + log::debug!( + target: LOG_TARGET, + "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", + >::name(), storage_version, target_version + ); + + ensure!( + T::Migrations::is_upgrade_supported(storage_version, target_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + ); + Ok(Default::default()) } } -/// Rename `AccountCounter` to `Nonce`. -mod v7 { - use super::*; - - pub fn migrate(weight: &mut Weight) { - #[storage_alias] - type AccountCounter = StorageValue, u64, ValueQuery>; - #[storage_alias] - type Nonce = StorageValue, u64, ValueQuery>; +/// The result of running the migration. +#[derive(Debug, PartialEq)] +pub enum MigrateResult { + /// No migration was performed + NoMigrationPerformed, + /// No migration currently in progress + NoMigrationInProgress, + /// A migration is in progress + InProgress { steps_done: u32 }, + /// All migrations are completed + Completed, +} - Nonce::::set(AccountCounter::::take()); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)) - } +/// The result of running a migration step. +#[derive(Debug, PartialEq)] +pub enum StepResult { + InProgress { cursor: Cursor, steps_done: u32 }, + Completed { steps_done: u32 }, } -/// Update `ContractInfo` with new fields that track storage deposits. -mod v8 { - use super::*; - use sp_io::default_child_storage as child; - use v6::ContractInfo as OldContractInfo; - - #[derive(Encode, Decode)] - pub struct ContractInfo { - pub trie_id: TrieId, - pub code_hash: CodeHash, - pub storage_bytes: u32, - pub storage_items: u32, - pub storage_byte_deposit: BalanceOf, - pub storage_item_deposit: BalanceOf, - pub storage_base_deposit: BalanceOf, +impl Migration { + /// Verify that each migration's step of the [`Config::Migrations`] sequence fits into + /// `Cursor`. + pub(crate) fn integrity_test() { + let max_weight = ::BlockWeights::get().max_block; + T::Migrations::integrity_test(max_weight) } - #[storage_alias] - type ContractInfoOf = - StorageMap, Twox64Concat, ::AccountId, V>; - - pub fn migrate(weight: &mut Weight) { - >>::translate_values(|old: OldContractInfo| { - // Count storage items of this contract - let mut storage_bytes = 0u32; - let mut storage_items = 0u32; - let mut key = Vec::new(); - while let Some(next) = child::next_key(&old.trie_id, &key) { - key = next; - let mut val_out = []; - let len = child::read(&old.trie_id, &key, &mut val_out, 0) - .expect("The loop conditions checks for existence of the key; qed"); - storage_bytes.saturating_accrue(len); - storage_items.saturating_accrue(1); - } + /// Migrate + /// Return the weight used and whether or not a migration is in progress + pub(crate) fn migrate(weight_limit: Weight) -> (MigrateResult, Weight) { + let name = >::name(); + let mut weight_left = weight_limit; + + if weight_left.checked_reduce(T::WeightInfo::migrate()).is_none() { + return (MigrateResult::NoMigrationPerformed, Weight::zero()) + } - let storage_byte_deposit = - T::DepositPerByte::get().saturating_mul(storage_bytes.into()); - let storage_item_deposit = - T::DepositPerItem::get().saturating_mul(storage_items.into()); - let storage_base_deposit = old - .storage_deposit - .saturating_sub(storage_byte_deposit) - .saturating_sub(storage_item_deposit); - - // Reads: One read for each storage item plus the contract info itself. - // Writes: Only the new contract info. - weight.saturating_accrue( - T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1), + MigrationInProgress::::mutate_exists(|progress| { + let Some(cursor_before) = progress.as_mut() else { + return (MigrateResult::NoMigrationInProgress, T::WeightInfo::migration_noop()) + }; + + // if a migration is running it is always upgrading to the next version + let storage_version = >::on_chain_storage_version(); + let in_progress_version = storage_version + 1; + + log::info!( + target: LOG_TARGET, + "{name}: Migrating from {:?} to {:?},", + storage_version, + in_progress_version, ); - Some(ContractInfo { - trie_id: old.trie_id, - code_hash: old.code_hash, - storage_bytes, - storage_items, - storage_byte_deposit, - storage_item_deposit, - storage_base_deposit, - }) - }); + let result = match T::Migrations::steps( + in_progress_version, + cursor_before.as_ref(), + &mut weight_left, + ) { + StepResult::InProgress { cursor, steps_done } => { + *progress = Some(cursor); + MigrateResult::InProgress { steps_done } + }, + StepResult::Completed { steps_done } => { + in_progress_version.put::>(); + if >::current_storage_version() != in_progress_version { + log::info!( + target: LOG_TARGET, + "{name}: Next migration is {:?},", + in_progress_version + 1 + ); + *progress = Some(T::Migrations::new(in_progress_version + 1)); + MigrateResult::InProgress { steps_done } + } else { + log::info!( + target: LOG_TARGET, + "{name}: All migrations done. At version {:?},", + in_progress_version + ); + *progress = None; + MigrateResult::Completed + } + }, + }; + + (result, weight_limit.saturating_sub(weight_left)) + }) } - #[cfg(feature = "try-runtime")] - pub fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::ReservableCurrency; - for (key, value) in ContractInfoOf::>::iter() { - let reserved = T::Currency::reserved_balance(&key); - ensure!(reserved >= value.storage_deposit, "Reserved balance out of sync."); + pub(crate) fn ensure_migrated() -> DispatchResult { + if Self::in_progress() { + Err(Error::::MigrationInProgress.into()) + } else { + Ok(()) } - Ok(()) + } + + pub(crate) fn in_progress() -> bool { + MigrationInProgress::::exists() } } -/// Update `CodeStorage` with the new `determinism` field. -mod v9 { - use super::*; - use crate::Determinism; - use v6::PrefabWasmModule as OldPrefabWasmModule; - - #[derive(Encode, Decode)] - pub struct PrefabWasmModule { - #[codec(compact)] - pub instruction_weights_version: u32, - #[codec(compact)] - pub initial: u32, - #[codec(compact)] - pub maximum: u32, - pub code: Vec, - pub determinism: Determinism, +#[impl_trait_for_tuples::impl_for_tuples(10)] +#[tuple_types_custom_trait_bound(MigrationStep)] +impl MigrateSequence for Tuple { + const VERSION_RANGE: (u16, u16) = { + let mut versions: (u16, u16) = (0, 0); + for_tuples!( + #( + match versions { + (0, 0) => { + versions = (Tuple::VERSION, Tuple::VERSION); + }, + (min_version, last_version) if Tuple::VERSION == last_version + 1 => { + versions = (min_version, Tuple::VERSION); + }, + _ => panic!("Migrations must be ordered by their versions with no gaps.") + } + )* + ); + versions + }; + + fn new(version: StorageVersion) -> Cursor { + for_tuples!( + #( + if version == Tuple::VERSION { + return Tuple::default().encode().try_into().expect(PROOF_ENCODE) + } + )* + ); + invalid_version(version) } - #[storage_alias] - type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; - - pub fn migrate(weight: &mut Weight) { - >::translate_values(|old: OldPrefabWasmModule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - Some(PrefabWasmModule { - instruction_weights_version: old.instruction_weights_version, - initial: old.initial, - maximum: old.maximum, - code: old.code, - determinism: Determinism::Enforced, - }) - }); + #[cfg(feature = "try-runtime")] + /// Execute the pre-checks of the step associated with this version. + fn pre_upgrade_step(version: StorageVersion) -> Result, TryRuntimeError> { + for_tuples!( + #( + if version == Tuple::VERSION { + return Tuple::pre_upgrade_step() + } + )* + ); + invalid_version(version) } -} -// Post checks always need to be run against the latest storage version. This is why we -// do not scope them in the per version modules. They always need to be ported to the latest -// version. -#[cfg(feature = "try-runtime")] -mod post_checks { - use super::*; - use crate::Determinism; - use sp_io::default_child_storage as child; - use v8::ContractInfo; - use v9::PrefabWasmModule; + #[cfg(feature = "try-runtime")] + /// Execute the post-checks of the step associated with this version. + fn post_upgrade_step(version: StorageVersion, state: Vec) -> Result<(), TryRuntimeError> { + for_tuples!( + #( + if version == Tuple::VERSION { + return Tuple::post_upgrade_step(state) + } + )* + ); + invalid_version(version) + } - #[storage_alias] - type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; + fn steps(version: StorageVersion, mut cursor: &[u8], weight_left: &mut Weight) -> StepResult { + for_tuples!( + #( + if version == Tuple::VERSION { + let mut migration = ::decode(&mut cursor) + .expect(PROOF_DECODE); + let max_weight = Tuple::max_step_weight(); + let mut steps_done = 0; + while weight_left.all_gt(max_weight) { + let (finished, weight) = migration.step(); + steps_done.saturating_accrue(1); + weight_left.saturating_reduce(weight); + if matches!(finished, IsFinished::Yes) { + return StepResult::Completed{ steps_done } + } + } + return StepResult::InProgress{cursor: migration.encode().try_into().expect(PROOF_ENCODE), steps_done } + } + )* + ); + invalid_version(version) + } - #[storage_alias] - type ContractInfoOf = - StorageMap, Twox64Concat, ::AccountId, V>; + fn integrity_test(max_block_weight: Weight) { + for_tuples!( + #( + Tuple::integrity_test(max_block_weight); + )* + ); + } +} - pub fn post_upgrade(old_version: StorageVersion) -> Result<(), &'static str> { - if old_version < 7 { - return Ok(()) - } +#[cfg(test)] +mod test { + use super::*; + use crate::{ + migration::codegen::LATEST_MIGRATION_VERSION, + tests::{ExtBuilder, Test}, + }; + + #[derive(Default, Encode, Decode, MaxEncodedLen)] + struct MockMigration { + // MockMigration needs `N` steps to finish + count: u16, + } - if old_version < 8 { - v8::()?; + impl MigrationStep for MockMigration { + const VERSION: u16 = N; + fn max_step_weight() -> Weight { + Weight::from_all(1) } - - if old_version < 9 { - v9::()?; + fn step(&mut self) -> (IsFinished, Weight) { + assert!(self.count != N); + self.count += 1; + if self.count == N { + (IsFinished::Yes, Weight::from_all(1)) + } else { + (IsFinished::No, Weight::from_all(1)) + } } + } - Ok(()) + #[test] + fn test_storage_version_matches_last_migration_file() { + assert_eq!(StorageVersion::new(LATEST_MIGRATION_VERSION), crate::pallet::STORAGE_VERSION); } - fn v8() -> Result<(), &'static str> { - use frame_support::traits::ReservableCurrency; - for (key, value) in ContractInfoOf::>::iter() { - let reserved = T::Currency::reserved_balance(&key); - let stored = value - .storage_base_deposit - .saturating_add(value.storage_byte_deposit) - .saturating_add(value.storage_item_deposit); - ensure!(reserved >= stored, "Reserved balance out of sync."); - - let mut storage_bytes = 0u32; - let mut storage_items = 0u32; - let mut key = Vec::new(); - while let Some(next) = child::next_key(&value.trie_id, &key) { - key = next; - let mut val_out = []; - let len = child::read(&value.trie_id, &key, &mut val_out, 0) - .expect("The loop conditions checks for existence of the key; qed"); - storage_bytes.saturating_accrue(len); - storage_items.saturating_accrue(1); - } - ensure!(storage_bytes == value.storage_bytes, "Storage bytes do not match.",); - ensure!(storage_items == value.storage_items, "Storage items do not match.",); - } - Ok(()) + #[test] + fn version_range_works() { + let range = <(MockMigration<1>, MockMigration<2>)>::VERSION_RANGE; + assert_eq!(range, (1, 2)); } - fn v9() -> Result<(), &'static str> { - for value in CodeStorage::::iter_values() { - ensure!( - value.determinism == Determinism::Enforced, - "All pre-existing codes need to be deterministic." - ); - } - Ok(()) + #[test] + fn is_upgrade_supported_works() { + type Migrations = (MockMigration<9>, MockMigration<10>, MockMigration<11>); + assert!(Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(11))); + assert!(!Migrations::is_upgrade_supported(StorageVersion::new(9), StorageVersion::new(11))); + assert!(!Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(12))); + } + + #[test] + fn steps_works() { + type Migrations = (MockMigration<2>, MockMigration<3>); + let version = StorageVersion::new(2); + let mut cursor = Migrations::new(version); + + let mut weight = Weight::from_all(2); + let result = Migrations::steps(version, &cursor, &mut weight); + cursor = vec![1u8, 0].try_into().unwrap(); + assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 }); + assert_eq!(weight, Weight::from_all(1)); + + let mut weight = Weight::from_all(2); + assert_eq!( + Migrations::steps(version, &cursor, &mut weight), + StepResult::Completed { steps_done: 1 } + ); + } + + #[test] + fn no_migration_in_progress_works() { + type TestMigration = Migration; + + ExtBuilder::default().build().execute_with(|| { + assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); + assert_eq!(TestMigration::migrate(Weight::MAX).0, MigrateResult::NoMigrationInProgress) + }); + } + + #[test] + fn migration_works() { + type TestMigration = Migration; + + ExtBuilder::default() + .set_storage_version(LATEST_MIGRATION_VERSION - 2) + .build() + .execute_with(|| { + assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION - 2); + TestMigration::on_runtime_upgrade(); + for (version, status) in [ + (LATEST_MIGRATION_VERSION - 1, MigrateResult::InProgress { steps_done: 1 }), + (LATEST_MIGRATION_VERSION, MigrateResult::Completed), + ] { + assert_eq!(TestMigration::migrate(Weight::MAX).0, status); + assert_eq!( + >::on_chain_storage_version(), + StorageVersion::new(version) + ); + } + + assert_eq!( + TestMigration::migrate(Weight::MAX).0, + MigrateResult::NoMigrationInProgress + ); + assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); + }); } } diff --git a/frame/contracts/src/migration/v09.rs b/frame/contracts/src/migration/v09.rs new file mode 100644 index 0000000000000..e6c6642955642 --- /dev/null +++ b/frame/contracts/src/migration/v09.rs @@ -0,0 +1,144 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Update `CodeStorage` with the new `determinism` field. + +use crate::{ + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + CodeHash, Config, Determinism, Pallet, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{codec, pallet_prelude::*, storage_alias, DefaultNoBound, Identity}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +use sp_std::prelude::*; + +mod old { + use super::*; + + #[derive(Encode, Decode)] + pub struct PrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: Vec, + } + + #[storage_alias] + pub type CodeStorage = + StorageMap, Identity, CodeHash, PrefabWasmModule>; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_old_dummy_code(len: usize) { + use sp_runtime::traits::Hash; + let module = old::PrefabWasmModule { + instruction_weights_version: 0, + initial: 0, + maximum: 0, + code: vec![42u8; len], + }; + let hash = T::Hashing::hash(&module.code); + old::CodeStorage::::insert(hash, module); +} + +#[derive(Encode, Decode)] +struct PrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: Vec, + pub determinism: Determinism, +} + +#[storage_alias] +type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration { + last_code_hash: Option>, +} + +impl MigrationStep for Migration { + const VERSION: u16 = 9; + + fn max_step_weight() -> Weight { + T::WeightInfo::v9_migration_step(T::MaxCodeLen::get()) + } + + fn step(&mut self) -> (IsFinished, Weight) { + let mut iter = if let Some(last_key) = self.last_code_hash.take() { + old::CodeStorage::::iter_from(old::CodeStorage::::hashed_key_for(last_key)) + } else { + old::CodeStorage::::iter() + }; + + if let Some((key, old)) = iter.next() { + log::debug!(target: LOG_TARGET, "Migrating contract code {:?}", key); + let len = old.code.len() as u32; + let module = PrefabWasmModule { + instruction_weights_version: old.instruction_weights_version, + initial: old.initial, + maximum: old.maximum, + code: old.code, + determinism: Determinism::Enforced, + }; + CodeStorage::::insert(key, module); + self.last_code_hash = Some(key); + (IsFinished::No, T::WeightInfo::v9_migration_step(len)) + } else { + log::debug!(target: LOG_TARGET, "No more contracts code to migrate"); + (IsFinished::Yes, T::WeightInfo::v9_migration_step(0)) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step() -> Result, TryRuntimeError> { + let sample: Vec<_> = old::CodeStorage::::iter().take(100).collect(); + + log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len()); + Ok(sample.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { + let sample = , old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) + .expect("pre_upgrade_step provides a valid state; qed"); + + log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len()); + for (code_hash, old) in sample { + let module = CodeStorage::::get(&code_hash).unwrap(); + ensure!( + module.instruction_weights_version == old.instruction_weights_version, + "invalid isntruction weights version" + ); + ensure!(module.determinism == Determinism::Enforced, "invalid determinism"); + ensure!(module.initial == old.initial, "invalid initial"); + ensure!(module.maximum == old.maximum, "invalid maximum"); + ensure!(module.code == old.code, "invalid code"); + } + + Ok(()) + } +} diff --git a/frame/contracts/src/migration/v10.rs b/frame/contracts/src/migration/v10.rs new file mode 100644 index 0000000000000..3fc7cabe94942 --- /dev/null +++ b/frame/contracts/src/migration/v10.rs @@ -0,0 +1,304 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Don't rely on reserved balances keeping an account alive +//! See . + +use crate::{ + address::AddressGenerator, + exec::AccountIdOf, + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + CodeHash, Config, Pallet, TrieId, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use core::cmp::{max, min}; +use frame_support::{ + codec, + pallet_prelude::*, + storage_alias, + traits::{ + tokens::{fungible::Inspect, Fortitude::Polite, Preservation::Preserve}, + ExistenceRequirement, ReservableCurrency, + }, + DefaultNoBound, +}; +use sp_core::hexdisplay::HexDisplay; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +use sp_runtime::{traits::Zero, Perbill, Saturating}; +use sp_std::{ops::Deref, prelude::*}; + +mod old { + use super::*; + + pub type BalanceOf = ::AccountId, + >>::Balance; + + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] + #[scale_info(skip_type_params(T, OldCurrency))] + pub struct ContractInfo + where + OldCurrency: ReservableCurrency<::AccountId>, + { + pub trie_id: TrieId, + pub code_hash: CodeHash, + pub storage_bytes: u32, + pub storage_items: u32, + pub storage_byte_deposit: BalanceOf, + pub storage_item_deposit: BalanceOf, + pub storage_base_deposit: BalanceOf, + } + + #[storage_alias] + pub type ContractInfoOf = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + ContractInfo, + >; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_old_contract_info( + account: T::AccountId, + info: crate::ContractInfo, +) where + OldCurrency: ReservableCurrency<::AccountId> + 'static, +{ + let info = old::ContractInfo { + trie_id: info.trie_id, + code_hash: info.code_hash, + storage_bytes: Default::default(), + storage_items: Default::default(), + storage_byte_deposit: Default::default(), + storage_item_deposit: Default::default(), + storage_base_deposit: Default::default(), + }; + old::ContractInfoOf::::insert(account, info); +} + +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct DepositAccount(AccountIdOf); + +impl Deref for DepositAccount { + type Target = AccountIdOf; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T, OldCurrency))] +pub struct ContractInfo +where + OldCurrency: ReservableCurrency<::AccountId>, +{ + pub trie_id: TrieId, + deposit_account: DepositAccount, + pub code_hash: CodeHash, + storage_bytes: u32, + storage_items: u32, + pub storage_byte_deposit: old::BalanceOf, + storage_item_deposit: old::BalanceOf, + storage_base_deposit: old::BalanceOf, +} + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration { + last_account: Option, + _phantom: PhantomData<(T, OldCurrency)>, +} + +#[storage_alias] +type ContractInfoOf = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + ContractInfo, +>; + +impl MigrationStep for Migration +where + OldCurrency: ReservableCurrency<::AccountId> + + Inspect<::AccountId, Balance = old::BalanceOf>, +{ + const VERSION: u16 = 10; + + fn max_step_weight() -> Weight { + T::WeightInfo::v10_migration_step() + } + + fn step(&mut self) -> (IsFinished, Weight) { + let mut iter = if let Some(last_account) = self.last_account.take() { + old::ContractInfoOf::::iter_from( + old::ContractInfoOf::::hashed_key_for(last_account), + ) + } else { + old::ContractInfoOf::::iter() + }; + + if let Some((account, contract)) = iter.next() { + let min_balance = ::AccountId, + >>::minimum_balance(); + log::debug!(target: LOG_TARGET, "Account: 0x{} ", HexDisplay::from(&account.encode())); + + // Get the new deposit account address + let deposit_account: DepositAccount = + DepositAccount(T::AddressGenerator::deposit_address(&account)); + + // Calculate the existing deposit, that should be reserved on the contract account + let old_deposit = contract + .storage_base_deposit + .saturating_add(contract.storage_item_deposit) + .saturating_add(contract.storage_byte_deposit); + + // Unreserve the existing deposit + // Note we can't use repatriate_reserve, because it only works with existing accounts + let remaining = OldCurrency::unreserve(&account, old_deposit); + if !remaining.is_zero() { + log::warn!( + target: LOG_TARGET, + "Partially unreserved. Remaining {:?} out of {:?} asked", + remaining, + old_deposit + ); + } + + // Attempt to transfer the old deposit to the deposit account. + let amount = old_deposit + .saturating_sub(min_balance) + .min(OldCurrency::reducible_balance(&account, Preserve, Polite)); + + let new_deposit = OldCurrency::transfer( + &account, + &deposit_account, + amount, + ExistenceRequirement::KeepAlive, + ) + .map(|_| { + log::debug!( + target: LOG_TARGET, + "Transferred deposit ({:?}) to deposit account", + amount + ); + amount + }) + // If it fails we fallback to minting the ED. + .unwrap_or_else(|err| { + log::error!( + target: LOG_TARGET, + "Failed to transfer the base deposit, reason: {:?}", + err + ); + OldCurrency::deposit_creating(&deposit_account, min_balance); + min_balance + }); + + // Calculate the new base_deposit to store in the contract: + // Ideally, it should be the same as the old one + // Ideally, it should be at least 2xED (for the contract and deposit accounts). + // It can't be more than the `new_deposit`. + let new_base_deposit = min( + max(contract.storage_base_deposit, min_balance.saturating_add(min_balance)), + new_deposit, + ); + + // Calculate the ratio to adjust storage_byte and storage_item deposits. + let new_deposit_without_base = new_deposit.saturating_sub(new_base_deposit); + let old_deposit_without_base = + old_deposit.saturating_sub(contract.storage_base_deposit); + let ratio = Perbill::from_rational(new_deposit_without_base, old_deposit_without_base); + + // Calculate the new storage deposits based on the ratio + let storage_byte_deposit = ratio.mul_ceil(contract.storage_byte_deposit); + let storage_item_deposit = ratio.mul_ceil(contract.storage_item_deposit); + + // Recalculate the new base deposit, instead of using new_base_deposit to avoid rounding + // errors + let storage_base_deposit = new_deposit + .saturating_sub(storage_byte_deposit) + .saturating_sub(storage_item_deposit); + + let new_contract_info = ContractInfo { + trie_id: contract.trie_id, + deposit_account, + code_hash: contract.code_hash, + storage_bytes: contract.storage_bytes, + storage_items: contract.storage_items, + storage_byte_deposit, + storage_item_deposit, + storage_base_deposit, + }; + + ContractInfoOf::::insert(&account, new_contract_info); + + // Store last key for next migration step + self.last_account = Some(account); + + (IsFinished::No, T::WeightInfo::v10_migration_step()) + } else { + log::debug!(target: LOG_TARGET, "Done Migrating contract info"); + (IsFinished::Yes, T::WeightInfo::v10_migration_step()) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step() -> Result, TryRuntimeError> { + let sample: Vec<_> = old::ContractInfoOf::::iter().take(10).collect(); + + log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); + Ok(sample.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { + let sample = )> as Decode>::decode( + &mut &state[..], + ) + .expect("pre_upgrade_step provides a valid state; qed"); + + log::debug!(target: LOG_TARGET, "Validating sample of {} contracts", sample.len()); + for (account, old_contract) in sample { + log::debug!(target: LOG_TARGET, "==="); + log::debug!(target: LOG_TARGET, "Account: 0x{} ", HexDisplay::from(&account.encode())); + let contract = ContractInfoOf::::get(&account).unwrap(); + ensure!(old_contract.trie_id == contract.trie_id, "invalid trie_id"); + ensure!(old_contract.code_hash == contract.code_hash, "invalid code_hash"); + ensure!(old_contract.storage_bytes == contract.storage_bytes, "invalid storage_bytes"); + ensure!(old_contract.storage_items == contract.storage_items, "invalid storage_items"); + + let deposit = >::total_balance( + &contract.deposit_account, + ); + ensure!( + deposit == + contract + .storage_base_deposit + .saturating_add(contract.storage_item_deposit) + .saturating_add(contract.storage_byte_deposit), + "deposit mismatch" + ); + } + + Ok(()) + } +} diff --git a/frame/contracts/src/migration/v11.rs b/frame/contracts/src/migration/v11.rs new file mode 100644 index 0000000000000..8123d73aee560 --- /dev/null +++ b/frame/contracts/src/migration/v11.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Overflowing bounded DeletionQueue. +//! See . + +use crate::{ + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + Config, Pallet, TrieId, Weight, LOG_TARGET, +}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +use codec::{Decode, Encode}; +use frame_support::{codec, pallet_prelude::*, storage_alias, DefaultNoBound}; +use sp_std::{marker::PhantomData, prelude::*}; +mod old { + use super::*; + + #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] + pub struct DeletedContract { + pub(crate) trie_id: TrieId, + } + + #[storage_alias] + pub type DeletionQueue = StorageValue, Vec>; +} + +#[derive(Encode, Decode, TypeInfo, MaxEncodedLen, DefaultNoBound, Clone)] +#[scale_info(skip_type_params(T))] +pub struct DeletionQueueManager { + insert_counter: u32, + delete_counter: u32, + _phantom: PhantomData, +} + +#[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))] +pub fn fill_old_queue(len: usize) { + let queue: Vec = + core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() }) + .take(len) + .collect(); + old::DeletionQueue::::set(Some(queue)); +} + +#[storage_alias] +type DeletionQueue = StorageMap, Twox64Concat, u32, TrieId>; + +#[storage_alias] +type DeletionQueueCounter = StorageValue, DeletionQueueManager, ValueQuery>; + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration { + _phantom: PhantomData, +} + +impl MigrationStep for Migration { + const VERSION: u16 = 11; + + // It would be more correct to make our use the now removed [DeletionQueueDepth](https://github.com/paritytech/substrate/pull/13702/files#diff-70e9723e9db62816e35f6f885b6770a8449c75a6c2733e9fa7a245fe52c4656c) + // but in practice the queue is always empty, so 128 is a good enough approximation for not + // underestimating the weight of our migration. + fn max_step_weight() -> Weight { + T::WeightInfo::v11_migration_step(128) + } + + fn step(&mut self) -> (IsFinished, Weight) { + let Some(old_queue) = old::DeletionQueue::::take() else { + return (IsFinished::Yes, Weight::zero()) + }; + let len = old_queue.len(); + + log::debug!( + target: LOG_TARGET, + "Migrating deletion queue with {} deleted contracts", + old_queue.len() + ); + + if !old_queue.is_empty() { + let mut queue = DeletionQueueManager::::default(); + for contract in old_queue { + >::insert(queue.insert_counter, contract.trie_id); + queue.insert_counter += 1; + } + + >::set(queue); + } + + (IsFinished::Yes, T::WeightInfo::v11_migration_step(len as u32)) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step() -> Result, TryRuntimeError> { + let old_queue = old::DeletionQueue::::take().unwrap_or_default(); + + if old_queue.is_empty() { + let len = 10u32; + log::debug!( + target: LOG_TARGET, + "Injecting {len} entries to deletion queue to test migration" + ); + fill_old_queue::(len as usize); + return Ok(len.encode()) + } + + Ok((old_queue.len() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { + let len = ::decode(&mut &state[..]) + .expect("pre_upgrade_step provides a valid state; qed"); + let counter = >::get(); + ensure!(counter.insert_counter == len, "invalid insert counter"); + ensure!(counter.delete_counter == 0, "invalid delete counter"); + Ok(()) + } +} diff --git a/frame/contracts/src/migration/v12.rs b/frame/contracts/src/migration/v12.rs new file mode 100644 index 0000000000000..75018f943d100 --- /dev/null +++ b/frame/contracts/src/migration/v12.rs @@ -0,0 +1,348 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Move `OwnerInfo` to `CodeInfo`, add `determinism` field to the latter, clear `CodeStorage` and +//! repay deposits. + +use crate::{ + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + AccountIdOf, BalanceOf, CodeHash, Config, Determinism, Pallet, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{ + codec, pallet_prelude::*, storage_alias, traits::ReservableCurrency, DefaultNoBound, Identity, +}; +use scale_info::prelude::format; +use sp_core::hexdisplay::HexDisplay; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; +use sp_std::prelude::*; + +mod old { + use super::*; + + pub type BalanceOf = ::AccountId, + >>::Balance; + + #[derive(Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] + #[codec(mel_bound())] + #[scale_info(skip_type_params(T, OldCurrency))] + pub struct OwnerInfo + where + OldCurrency: ReservableCurrency<::AccountId>, + { + pub owner: AccountIdOf, + #[codec(compact)] + pub deposit: BalanceOf, + #[codec(compact)] + pub refcount: u64, + } + + #[derive(Encode, Decode, scale_info::TypeInfo)] + #[codec(mel_bound())] + #[scale_info(skip_type_params(T))] + pub struct PrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: Vec, + pub determinism: Determinism, + } + + #[storage_alias] + pub type OwnerInfoOf = + StorageMap, Identity, CodeHash, OwnerInfo>; + + #[storage_alias] + pub type CodeStorage = + StorageMap, Identity, CodeHash, PrefabWasmModule>; +} + +#[derive(Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +#[codec(mel_bound())] +#[scale_info(skip_type_params(T, OldCurrency))] +pub struct CodeInfo +where + OldCurrency: ReservableCurrency<::AccountId>, +{ + owner: AccountIdOf, + #[codec(compact)] + deposit: old::BalanceOf, + #[codec(compact)] + refcount: u64, + determinism: Determinism, + code_len: u32, +} + +#[storage_alias] +pub type CodeInfoOf = + StorageMap, Twox64Concat, CodeHash, CodeInfo>; + +#[storage_alias] +pub type PristineCode = StorageMap, Identity, CodeHash, Vec>; + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_old_dummy_code(len: usize, account: T::AccountId) +where + OldCurrency: ReservableCurrency<::AccountId> + 'static, +{ + use sp_runtime::traits::Hash; + + let code = vec![42u8; len]; + let hash = T::Hashing::hash(&code); + PristineCode::::insert(hash, code.clone()); + + let module = old::PrefabWasmModule { + instruction_weights_version: Default::default(), + initial: Default::default(), + maximum: Default::default(), + code, + determinism: Determinism::Enforced, + }; + old::CodeStorage::::insert(hash, module); + + let info = old::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; + old::OwnerInfoOf::::insert(hash, info); +} + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration +where + OldCurrency: ReservableCurrency<::AccountId>, + OldCurrency::Balance: From>, +{ + last_code_hash: Option>, + _phantom: PhantomData, +} + +impl MigrationStep for Migration +where + OldCurrency: ReservableCurrency<::AccountId> + 'static, + OldCurrency::Balance: From>, +{ + const VERSION: u16 = 12; + + fn max_step_weight() -> Weight { + T::WeightInfo::v12_migration_step(T::MaxCodeLen::get()) + } + + fn step(&mut self) -> (IsFinished, Weight) { + let mut iter = if let Some(last_key) = self.last_code_hash.take() { + old::OwnerInfoOf::::iter_from( + old::OwnerInfoOf::::hashed_key_for(last_key), + ) + } else { + old::OwnerInfoOf::::iter() + }; + if let Some((hash, old_info)) = iter.next() { + log::debug!(target: LOG_TARGET, "Migrating OwnerInfo for code_hash {:?}", hash); + + let module = old::CodeStorage::::take(hash) + .expect(format!("No PrefabWasmModule found for code_hash: {:?}", hash).as_str()); + + let code_len = module.code.len(); + // We print this to measure the impact of the migration. + // Storage removed: deleted PrefabWasmModule's encoded len. + // Storage added: determinism field encoded len (as all other CodeInfo fields are the + // same as in the deleted OwnerInfo). + log::debug!(target: LOG_TARGET, "Storage removed: 1 item, {} bytes", &code_len,); + + // Storage usage prices could change over time, and accounts who uploaded their + // contracts code before the storage deposits where introduced, had not been ever + // charged with any deposit for that (see migration v6). + // + // This is why deposit to be refunded here is calculated as follows: + // + // 1. Calculate the deposit amount for storage before the migration, given current + // prices. + // 2. Given current reserved deposit amount, calculate the correction factor. + // 3. Calculate the deposit amount for storage after the migration, given current + // prices. + // 4. Calculate real deposit amount to be reserved after the migration. + let price_per_byte = T::DepositPerByte::get(); + let price_per_item = T::DepositPerItem::get(); + let bytes_before = module + .encoded_size() + .saturating_add(code_len) + .saturating_add(old::OwnerInfo::::max_encoded_len()) + as u32; + let items_before = 3u32; + let deposit_expected_before = price_per_byte + .saturating_mul(bytes_before.into()) + .saturating_add(price_per_item.saturating_mul(items_before.into())); + let ratio = FixedU128::checked_from_rational(old_info.deposit, deposit_expected_before) + .unwrap_or_default() + .min(FixedU128::from_u32(1)); + let bytes_after = + code_len.saturating_add(CodeInfo::::max_encoded_len()) as u32; + let items_after = 2u32; + let deposit_expected_after = price_per_byte + .saturating_mul(bytes_after.into()) + .saturating_add(price_per_item.saturating_mul(items_after.into())); + let deposit = ratio.saturating_mul_int(deposit_expected_after); + + let info = CodeInfo:: { + determinism: module.determinism, + owner: old_info.owner, + deposit: deposit.into(), + refcount: old_info.refcount, + code_len: code_len as u32, + }; + + let amount = old_info.deposit.saturating_sub(info.deposit); + if !amount.is_zero() { + OldCurrency::unreserve(&info.owner, amount); + log::debug!( + target: LOG_TARGET, + "Deposit refunded: {:?} Balance, to: {:?}", + &amount, + HexDisplay::from(&info.owner.encode()) + ); + } else { + log::warn!( + target: LOG_TARGET, + "new deposit: {:?} >= old deposit: {:?}", + &info.deposit, + &old_info.deposit + ); + } + CodeInfoOf::::insert(hash, info); + + self.last_code_hash = Some(hash); + + (IsFinished::No, T::WeightInfo::v12_migration_step(code_len as u32)) + } else { + log::debug!(target: LOG_TARGET, "No more OwnerInfo to migrate"); + (IsFinished::Yes, T::WeightInfo::v12_migration_step(0)) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step() -> Result, TryRuntimeError> { + let len = 100; + log::debug!(target: LOG_TARGET, "Taking sample of {} OwnerInfo(s)", len); + let sample: Vec<_> = old::OwnerInfoOf::::iter() + .take(len) + .map(|(k, v)| { + let module = old::CodeStorage::::get(k) + .expect("No PrefabWasmModule found for code_hash: {:?}"); + let info: CodeInfo = CodeInfo { + determinism: module.determinism, + deposit: v.deposit, + refcount: v.refcount, + owner: v.owner, + code_len: module.code.len() as u32, + }; + (k, info) + }) + .collect(); + + let storage: u32 = + old::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); + let mut deposit: old::BalanceOf = Default::default(); + old::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); + + Ok((sample, deposit, storage).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { + let state = <( + Vec<(CodeHash, CodeInfo)>, + old::BalanceOf, + u32, + ) as Decode>::decode(&mut &state[..]) + .unwrap(); + + log::debug!(target: LOG_TARGET, "Validating state of {} Codeinfo(s)", state.0.len()); + for (hash, old) in state.0 { + let info = CodeInfoOf::::get(&hash) + .expect(format!("CodeInfo for code_hash {:?} not found!", hash).as_str()); + ensure!(info.determinism == old.determinism, "invalid determinism"); + ensure!(info.owner == old.owner, "invalid owner"); + ensure!(info.refcount == old.refcount, "invalid refcount"); + } + + if let Some((k, _)) = old::CodeStorage::::iter().next() { + log::warn!( + target: LOG_TARGET, + "CodeStorage is still NOT empty, found code_hash: {:?}", + k + ); + } else { + log::debug!(target: LOG_TARGET, "CodeStorage is empty."); + } + if let Some((k, _)) = old::OwnerInfoOf::::iter().next() { + log::warn!( + target: LOG_TARGET, + "OwnerInfoOf is still NOT empty, found code_hash: {:?}", + k + ); + } else { + log::debug!(target: LOG_TARGET, "OwnerInfoOf is empty."); + } + + let mut deposit: old::BalanceOf = Default::default(); + let mut items = 0u32; + let mut storage_info = 0u32; + CodeInfoOf::::iter().for_each(|(_k, v)| { + deposit += v.deposit; + items += 1; + storage_info += v.encoded_size() as u32; + }); + let mut storage_code = 0u32; + PristineCode::::iter().for_each(|(_k, v)| { + storage_code += v.len() as u32; + }); + let (_, old_deposit, storage_module) = state; + // CodeInfoOf::max_encoded_len == OwnerInfoOf::max_encoded_len + 1 + // I.e. code info adds up 1 byte per record. + let info_bytes_added = items.clone(); + // We removed 1 PrefabWasmModule, and added 1 byte of determinism flag, per contract code. + let storage_removed = storage_module.saturating_sub(info_bytes_added); + // module+code+info - bytes + let storage_was = storage_module + .saturating_add(storage_code) + .saturating_add(storage_info) + .saturating_sub(info_bytes_added); + // We removed 1 storage item (PrefabWasmMod) for every stored contract code (was stored 3 + // items per code). + let items_removed = items; + log::info!( + target: LOG_TARGET, + "Storage freed, bytes: {} (of {}), items: {} (of {})", + storage_removed, + storage_was, + items_removed, + items_removed * 3, + ); + log::info!( + target: LOG_TARGET, + "Deposits returned, total: {:?} Balance (of {:?} Balance)", + old_deposit.saturating_sub(deposit), + old_deposit, + ); + + Ok(()) + } +} diff --git a/frame/contracts/src/migration/v13.rs b/frame/contracts/src/migration/v13.rs new file mode 100644 index 0000000000000..ade837f46e9b1 --- /dev/null +++ b/frame/contracts/src/migration/v13.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Add `delegate_dependencies` to `ContractInfo`. +//! See . + +use crate::{ + migration::{IsFinished, MigrationStep}, + storage::DepositAccount, + weights::WeightInfo, + BalanceOf, CodeHash, Config, Pallet, TrieId, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{codec, pallet_prelude::*, storage_alias, DefaultNoBound}; +use sp_runtime::BoundedBTreeMap; +use sp_std::prelude::*; + +mod old { + use crate::storage::DepositAccount; + + use super::*; + + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] + #[scale_info(skip_type_params(T))] + pub struct ContractInfo { + pub trie_id: TrieId, + pub deposit_account: DepositAccount, + pub code_hash: CodeHash, + pub storage_bytes: u32, + pub storage_items: u32, + pub storage_byte_deposit: BalanceOf, + pub storage_item_deposit: BalanceOf, + pub storage_base_deposit: BalanceOf, + } + + #[storage_alias] + pub type ContractInfoOf = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + ContractInfo, + >; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_old_contract_info(account: T::AccountId, info: crate::ContractInfo) { + let info = old::ContractInfo { + trie_id: info.trie_id.clone(), + deposit_account: info.deposit_account().clone(), + code_hash: info.code_hash, + storage_bytes: Default::default(), + storage_items: Default::default(), + storage_byte_deposit: Default::default(), + storage_item_deposit: Default::default(), + storage_base_deposit: Default::default(), + }; + old::ContractInfoOf::::insert(account, info); +} + +#[storage_alias] +pub type ContractInfoOf = + StorageMap, Twox64Concat, ::AccountId, ContractInfo>; + +#[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct ContractInfo { + trie_id: TrieId, + deposit_account: DepositAccount, + code_hash: CodeHash, + storage_bytes: u32, + storage_items: u32, + storage_byte_deposit: BalanceOf, + storage_item_deposit: BalanceOf, + storage_base_deposit: BalanceOf, + delegate_dependencies: BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies>, +} + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration { + last_account: Option, +} + +impl MigrationStep for Migration { + const VERSION: u16 = 13; + + fn max_step_weight() -> Weight { + T::WeightInfo::v13_migration_step() + } + + fn step(&mut self) -> (IsFinished, Weight) { + let mut iter = if let Some(last_account) = self.last_account.take() { + old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + last_account, + )) + } else { + old::ContractInfoOf::::iter() + }; + + if let Some((key, old)) = iter.next() { + log::debug!(target: LOG_TARGET, "Migrating contract {:?}", key); + let info = ContractInfo { + trie_id: old.trie_id, + deposit_account: old.deposit_account, + code_hash: old.code_hash, + storage_bytes: old.storage_bytes, + storage_items: old.storage_items, + storage_byte_deposit: old.storage_byte_deposit, + storage_item_deposit: old.storage_item_deposit, + storage_base_deposit: old.storage_base_deposit, + delegate_dependencies: Default::default(), + }; + ContractInfoOf::::insert(key.clone(), info); + self.last_account = Some(key); + (IsFinished::No, T::WeightInfo::v13_migration_step()) + } else { + log::debug!(target: LOG_TARGET, "No more contracts to migrate"); + (IsFinished::Yes, T::WeightInfo::v13_migration_step()) + } + } +} diff --git a/frame/contracts/src/migration/v14.rs b/frame/contracts/src/migration/v14.rs new file mode 100644 index 0000000000000..ebf97af562ed4 --- /dev/null +++ b/frame/contracts/src/migration/v14.rs @@ -0,0 +1,269 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Update the code owner balance, make the storage deposit reserved balance to be held instead. + +use crate::{ + exec::AccountIdOf, + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + BalanceOf, CodeHash, Config, Determinism, HoldReason, Pallet, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +#[cfg(feature = "try-runtime")] +use environmental::Vec; +#[cfg(feature = "try-runtime")] +use frame_support::traits::fungible::{Inspect, InspectHold}; +use frame_support::{ + codec, + pallet_prelude::*, + storage_alias, + traits::{fungible::MutateHold, ReservableCurrency}, + DefaultNoBound, +}; +use sp_core::hexdisplay::HexDisplay; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +use sp_runtime::{traits::Zero, Saturating}; +#[cfg(feature = "try-runtime")] +use sp_std::collections::btree_map::BTreeMap; + +mod old { + use super::*; + + pub type BalanceOf = ::AccountId, + >>::Balance; + + #[derive(Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] + #[codec(mel_bound())] + #[scale_info(skip_type_params(T, OldCurrency))] + pub struct CodeInfo + where + T: Config, + OldCurrency: ReservableCurrency<::AccountId>, + { + pub owner: AccountIdOf, + #[codec(compact)] + pub deposit: old::BalanceOf, + #[codec(compact)] + pub refcount: u64, + pub determinism: Determinism, + pub code_len: u32, + } + + #[storage_alias] + pub type CodeInfoOf = + StorageMap, Twox64Concat, CodeHash, CodeInfo>; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_dummy_code(account: T::AccountId) +where + T: Config, + OldCurrency: ReservableCurrency<::AccountId> + 'static, +{ + use sp_runtime::traits::Hash; + use sp_std::vec; + + let len = T::MaxCodeLen::get(); + let code = vec![42u8; len as usize]; + let hash = T::Hashing::hash(&code); + + let info = old::CodeInfo { + owner: account, + deposit: 10_000u32.into(), + refcount: u64::MAX, + determinism: Determinism::Enforced, + code_len: len, + }; + old::CodeInfoOf::::insert(hash, info); +} + +#[cfg(feature = "try-runtime")] +#[derive(Encode, Decode)] +/// Accounts for the balance allocation of a code owner. +struct BalanceAllocation +where + T: Config, + OldCurrency: ReservableCurrency<::AccountId>, +{ + /// Total reserved balance as storage deposit for the owner. + reserved: old::BalanceOf, + /// Total balance of the owner. + total: old::BalanceOf, +} + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration +where + T: Config, + OldCurrency: ReservableCurrency<::AccountId>, +{ + last_code_hash: Option>, + _phantom: PhantomData<(T, OldCurrency)>, +} + +impl MigrationStep for Migration +where + T: Config, + OldCurrency: 'static + ReservableCurrency<::AccountId>, + BalanceOf: From, +{ + const VERSION: u16 = 14; + + fn max_step_weight() -> Weight { + T::WeightInfo::v14_migration_step() + } + + fn step(&mut self) -> (IsFinished, Weight) { + let mut iter = if let Some(last_hash) = self.last_code_hash.take() { + old::CodeInfoOf::::iter_from( + old::CodeInfoOf::::hashed_key_for(last_hash), + ) + } else { + old::CodeInfoOf::::iter() + }; + + if let Some((hash, code_info)) = iter.next() { + log::debug!(target: LOG_TARGET, "Migrating storage deposit for 0x{:?}", HexDisplay::from(&code_info.owner.encode())); + + let remaining = OldCurrency::unreserve(&code_info.owner, code_info.deposit); + + if remaining > Zero::zero() { + log::warn!( + target: LOG_TARGET, + "Code owner's account 0x{:?} for code {:?} has some non-unreservable deposit {:?} from a total of {:?} that will remain in reserved.", + HexDisplay::from(&code_info.owner.encode()), + hash, + remaining, + code_info.deposit + ); + } + + let unreserved = code_info.deposit.saturating_sub(remaining); + let amount = BalanceOf::::from(unreserved); + + log::debug!( + target: LOG_TARGET, + "Holding {:?} on the code owner's account 0x{:?} for code {:?}.", + amount, + HexDisplay::from(&code_info.owner.encode()), + hash, + ); + + T::Currency::hold( + &HoldReason::CodeUploadDepositReserve.into(), + &code_info.owner, + amount, + ) + .unwrap_or_else(|err| { + log::error!( + target: LOG_TARGET, + "Failed to hold {:?} from the code owner's account 0x{:?} for code {:?}, reason: {:?}.", + amount, + HexDisplay::from(&code_info.owner.encode()), + hash, + err + ); + }); + + self.last_code_hash = Some(hash); + (IsFinished::No, T::WeightInfo::v14_migration_step()) + } else { + log::debug!(target: LOG_TARGET, "No more storage deposit to migrate"); + (IsFinished::Yes, T::WeightInfo::v14_migration_step()) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade_step() -> Result, TryRuntimeError> { + let info: Vec<_> = old::CodeInfoOf::::iter().collect(); + + let mut owner_balance_allocation = + BTreeMap::, BalanceAllocation>::new(); + + // Calculates the balance allocation by accumulating the storage deposits of all codes owned + // by an owner. + for (_, code_info) in info { + owner_balance_allocation + .entry(code_info.owner.clone()) + .and_modify(|alloc| { + alloc.reserved = alloc.reserved.saturating_add(code_info.deposit); + }) + .or_insert(BalanceAllocation { + reserved: code_info.deposit, + total: OldCurrency::total_balance(&code_info.owner), + }); + } + + Ok(owner_balance_allocation.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { + let owner_balance_allocation = + , BalanceAllocation> as Decode>::decode( + &mut &state[..], + ) + .expect("pre_upgrade_step provides a valid state; qed"); + + let mut total_held: BalanceOf = Zero::zero(); + let count = owner_balance_allocation.len(); + for (owner, old_balance_allocation) in owner_balance_allocation { + let held = + T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &owner); + log::debug!( + target: LOG_TARGET, + "Validating storage deposit for owner 0x{:?}, reserved: {:?}, held: {:?}", + HexDisplay::from(&owner.encode()), + old_balance_allocation.reserved, + held + ); + ensure!(held == old_balance_allocation.reserved.into(), "Held amount mismatch"); + + log::debug!( + target: LOG_TARGET, + "Validating total balance for owner 0x{:?}, new: {:?}, old: {:?}", + HexDisplay::from(&owner.encode()), + T::Currency::total_balance(&owner), + old_balance_allocation.total + ); + ensure!( + T::Currency::total_balance(&owner) == + BalanceOf::::decode(&mut &old_balance_allocation.total.encode()[..]) + .unwrap(), + "Balance mismatch " + ); + total_held += held; + } + + log::info!( + target: LOG_TARGET, + "Code owners processed: {:?}.", + count + ); + + log::info!( + target: LOG_TARGET, + "Total held amount for storage deposit: {:?}", + total_held + ); + + Ok(()) + } +} diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index c6eedb155d6a4..5ca18af026a4c 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -18,7 +18,7 @@ //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{wasm::Determinism, weights::WeightInfo, Config}; +use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; use frame_support::{weights::Weight, DefaultNoBound}; @@ -28,7 +28,6 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::RuntimeDebug; use sp_std::marker::PhantomData; -use wasm_instrument::{gas_metering, parity_wasm::elements}; /// Definition of the cost schedule and other parameterizations for the wasm vm. /// @@ -50,18 +49,12 @@ use wasm_instrument::{gas_metering, parity_wasm::elements}; /// .. Default::default() /// }, /// instruction_weights: InstructionWeights { -/// version: 5, /// .. Default::default() /// }, /// .. Default::default() /// } /// } /// ``` -/// -/// # Note -/// -/// Please make sure to bump the [`InstructionWeights::version`] whenever substantial -/// changes are made to its values. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))] #[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug, DefaultNoBound, TypeInfo)] @@ -78,12 +71,6 @@ pub struct Schedule { } /// Describes the upper limits on various metrics. -/// -/// # Note -/// -/// The values in this struct should never be decreased. The reason is that decreasing those -/// values will break existing contracts which are above the new limits when a -/// re-instrumentation is triggered. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Limits { @@ -140,101 +127,15 @@ impl Limits { } } -/// Describes the weight for all categories of supported wasm instructions. -/// -/// There there is one field for each wasm instruction that describes the weight to -/// execute one instruction of that name. There are a few exceptions: -/// -/// 1. If there is a i64 and a i32 variant of an instruction we use the weight -/// of the former for both. -/// 2. The following instructions are free of charge because they merely structure the -/// wasm module and cannot be spammed without making the module invalid (and rejected): -/// End, Unreachable, Return, Else -/// 3. The following instructions cannot be benchmarked because they are removed by any -/// real world execution engine as a preprocessing step and therefore don't yield a -/// meaningful benchmark result. However, in contrast to the instructions mentioned -/// in 2. they can be spammed. We price them with the same weight as the "default" -/// instruction (i64.const): Block, Loop, Nop -/// 4. We price both i64.const and drop as InstructionWeights.i64const / 2. The reason -/// for that is that we cannot benchmark either of them on its own but we need their -/// individual values to derive (by subtraction) the weight of all other instructions -/// that use them as supporting instructions. Supporting means mainly pushing arguments -/// and dropping return values in order to maintain a valid module. +/// Gas metering of Wasm executed instructions is being done on the engine side. +/// This struct holds a reference value used to gas units scaling between host and engine. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct InstructionWeights { - /// Version of the instruction weights. - /// - /// # Note - /// - /// Should be incremented whenever any instruction weight is changed. The - /// reason is that changes to instruction weights require a re-instrumentation - /// in order to apply the changes to an already deployed code. The re-instrumentation - /// is triggered by comparing the version of the current schedule with the version the code was - /// instrumented with. Changes usually happen when pallet_contracts is re-benchmarked. - /// - /// Changes to other parts of the schedule should not increment the version in - /// order to avoid unnecessary re-instrumentations. - pub version: u32, - /// Weight to be used for instructions which don't have benchmarks assigned. - /// - /// This weight is used whenever a code is uploaded with [`Determinism::Relaxed`] - /// and an instruction (usually a float instruction) is encountered. This weight is **not** - /// used if a contract is uploaded with [`Determinism::Enforced`]. If this field is set to - /// `0` (the default) only deterministic codes are allowed to be uploaded. - pub fallback: u32, - pub i64const: u32, - pub i64load: u32, - pub i64store: u32, - pub select: u32, - pub r#if: u32, - pub br: u32, - pub br_if: u32, - pub br_table: u32, - pub br_table_per_entry: u32, - pub call: u32, - pub call_indirect: u32, - pub call_per_local: u32, - pub local_get: u32, - pub local_set: u32, - pub local_tee: u32, - pub global_get: u32, - pub global_set: u32, - pub memory_current: u32, - pub memory_grow: u32, - pub i64clz: u32, - pub i64ctz: u32, - pub i64popcnt: u32, - pub i64eqz: u32, - pub i64extendsi32: u32, - pub i64extendui32: u32, - pub i32wrapi64: u32, - pub i64eq: u32, - pub i64ne: u32, - pub i64lts: u32, - pub i64ltu: u32, - pub i64gts: u32, - pub i64gtu: u32, - pub i64les: u32, - pub i64leu: u32, - pub i64ges: u32, - pub i64geu: u32, - pub i64add: u32, - pub i64sub: u32, - pub i64mul: u32, - pub i64divs: u32, - pub i64divu: u32, - pub i64rems: u32, - pub i64remu: u32, - pub i64and: u32, - pub i64or: u32, - pub i64xor: u32, - pub i64shl: u32, - pub i64shrs: u32, - pub i64shru: u32, - pub i64rotl: u32, - pub i64rotr: u32, + /// Base instruction `ref_time` Weight. + /// Should match to wasmi's `1` fuel (see ). + pub base: u32, /// The type parameter is used in the default implementation. #[codec(skip)] pub _phantom: PhantomData, @@ -287,9 +188,6 @@ pub struct HostFnWeights { /// Weight of calling `seal_weight_to_fee`. pub weight_to_fee: Weight, - /// Weight of calling `gas`. - pub gas: Weight, - /// Weight of calling `seal_input`. pub input: Weight, @@ -431,6 +329,12 @@ pub struct HostFnWeights { /// Weight of calling `instantiation_nonce`. pub instantiation_nonce: Weight, + /// Weight of calling `add_delegate_dependency`. + pub add_delegate_dependency: Weight, + + /// Weight of calling `remove_delegate_dependency`. + pub remove_delegate_dependency: Weight, + /// The type parameter is used in the default implementation. #[codec(skip)] pub _phantom: PhantomData, @@ -492,63 +396,10 @@ impl Default for Limits { } impl Default for InstructionWeights { + /// We price both `i64.const` and `drop` as `instr_i64const / 2`. The reason + /// for that is that we cannot benchmark either of them on its own. fn default() -> Self { - Self { - version: 4, - fallback: 0, - i64const: cost_instr!(instr_i64const, 1), - i64load: cost_instr!(instr_i64load, 2), - i64store: cost_instr!(instr_i64store, 2), - select: cost_instr!(instr_select, 4), - r#if: cost_instr!(instr_if, 3), - br: cost_instr!(instr_br, 2), - br_if: cost_instr!(instr_br_if, 3), - br_table: cost_instr!(instr_br_table, 3), - br_table_per_entry: cost_instr!(instr_br_table_per_entry, 0), - call: cost_instr!(instr_call, 2), - call_indirect: cost_instr!(instr_call_indirect, 3), - call_per_local: cost_instr!(instr_call_per_local, 0), - local_get: cost_instr!(instr_local_get, 1), - local_set: cost_instr!(instr_local_set, 1), - local_tee: cost_instr!(instr_local_tee, 2), - global_get: cost_instr!(instr_global_get, 1), - global_set: cost_instr!(instr_global_set, 1), - memory_current: cost_instr!(instr_memory_current, 1), - memory_grow: cost_instr!(instr_memory_grow, 1), - i64clz: cost_instr!(instr_i64clz, 2), - i64ctz: cost_instr!(instr_i64ctz, 2), - i64popcnt: cost_instr!(instr_i64popcnt, 2), - i64eqz: cost_instr!(instr_i64eqz, 2), - i64extendsi32: cost_instr!(instr_i64extendsi32, 2), - i64extendui32: cost_instr!(instr_i64extendui32, 2), - i32wrapi64: cost_instr!(instr_i32wrapi64, 2), - i64eq: cost_instr!(instr_i64eq, 3), - i64ne: cost_instr!(instr_i64ne, 3), - i64lts: cost_instr!(instr_i64lts, 3), - i64ltu: cost_instr!(instr_i64ltu, 3), - i64gts: cost_instr!(instr_i64gts, 3), - i64gtu: cost_instr!(instr_i64gtu, 3), - i64les: cost_instr!(instr_i64les, 3), - i64leu: cost_instr!(instr_i64leu, 3), - i64ges: cost_instr!(instr_i64ges, 3), - i64geu: cost_instr!(instr_i64geu, 3), - i64add: cost_instr!(instr_i64add, 3), - i64sub: cost_instr!(instr_i64sub, 3), - i64mul: cost_instr!(instr_i64mul, 3), - i64divs: cost_instr!(instr_i64divs, 3), - i64divu: cost_instr!(instr_i64divu, 3), - i64rems: cost_instr!(instr_i64rems, 3), - i64remu: cost_instr!(instr_i64remu, 3), - i64and: cost_instr!(instr_i64and, 3), - i64or: cost_instr!(instr_i64or, 3), - i64xor: cost_instr!(instr_i64xor, 3), - i64shl: cost_instr!(instr_i64shl, 3), - i64shrs: cost_instr!(instr_i64shrs, 3), - i64shru: cost_instr!(instr_i64shru, 3), - i64rotl: cost_instr!(instr_i64rotl, 3), - i64rotr: cost_instr!(instr_i64rotr, 3), - _phantom: PhantomData, - } + Self { base: cost_instr!(instr_i64const, 1), _phantom: PhantomData } } } @@ -569,12 +420,6 @@ impl Default for HostFnWeights { block_number: cost!(seal_block_number), now: cost!(seal_now), weight_to_fee: cost!(seal_weight_to_fee), - // Manually remove proof size from basic block cost. - // - // Due to imperfect benchmarking some host functions incur a small - // amount of proof size. Usually this is ok. However, charging a basic block is such - // a frequent operation that this would be a vast overestimation. - gas: cost!(seal_gas).set_proof_size(0), input: cost!(seal_input), input_per_byte: cost!(seal_input_per_byte), r#return: cost!(seal_return), @@ -637,118 +482,13 @@ impl Default for HostFnWeights { reentrance_count: cost!(seal_reentrance_count), account_reentrance_count: cost!(seal_account_reentrance_count), instantiation_nonce: cost!(seal_instantiation_nonce), + add_delegate_dependency: cost!(add_delegate_dependency), + remove_delegate_dependency: cost!(remove_delegate_dependency), _phantom: PhantomData, } } } -struct ScheduleRules<'a, T: Config> { - schedule: &'a Schedule, - determinism: Determinism, -} - -impl Schedule { - pub(crate) fn rules(&self, determinism: Determinism) -> impl gas_metering::Rules + '_ { - ScheduleRules { schedule: self, determinism } - } -} - -impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { - fn instruction_cost(&self, instruction: &elements::Instruction) -> Option { - use self::elements::Instruction::*; - let w = &self.schedule.instruction_weights; - - let weight = match *instruction { - End | Unreachable | Return | Else => 0, - I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, - I32Load(_, _) | - I32Load8S(_, _) | - I32Load8U(_, _) | - I32Load16S(_, _) | - I32Load16U(_, _) | - I64Load(_, _) | - I64Load8S(_, _) | - I64Load8U(_, _) | - I64Load16S(_, _) | - I64Load16U(_, _) | - I64Load32S(_, _) | - I64Load32U(_, _) => w.i64load, - I32Store(_, _) | - I32Store8(_, _) | - I32Store16(_, _) | - I64Store(_, _) | - I64Store8(_, _) | - I64Store16(_, _) | - I64Store32(_, _) => w.i64store, - Select => w.select, - If(_) => w.r#if, - Br(_) => w.br, - BrIf(_) => w.br_if, - Call(_) => w.call, - GetLocal(_) => w.local_get, - SetLocal(_) => w.local_set, - TeeLocal(_) => w.local_tee, - GetGlobal(_) => w.global_get, - SetGlobal(_) => w.global_set, - CurrentMemory(_) => w.memory_current, - GrowMemory(_) => w.memory_grow, - CallIndirect(_, _) => w.call_indirect, - BrTable(ref data) => w - .br_table - .saturating_add(w.br_table_per_entry.saturating_mul(data.table.len() as u32)), - I32Clz | I64Clz => w.i64clz, - I32Ctz | I64Ctz => w.i64ctz, - I32Popcnt | I64Popcnt => w.i64popcnt, - I32Eqz | I64Eqz => w.i64eqz, - I64ExtendSI32 => w.i64extendsi32, - I64ExtendUI32 => w.i64extendui32, - I32WrapI64 => w.i32wrapi64, - I32Eq | I64Eq => w.i64eq, - I32Ne | I64Ne => w.i64ne, - I32LtS | I64LtS => w.i64lts, - I32LtU | I64LtU => w.i64ltu, - I32GtS | I64GtS => w.i64gts, - I32GtU | I64GtU => w.i64gtu, - I32LeS | I64LeS => w.i64les, - I32LeU | I64LeU => w.i64leu, - I32GeS | I64GeS => w.i64ges, - I32GeU | I64GeU => w.i64geu, - I32Add | I64Add => w.i64add, - I32Sub | I64Sub => w.i64sub, - I32Mul | I64Mul => w.i64mul, - I32DivS | I64DivS => w.i64divs, - I32DivU | I64DivU => w.i64divu, - I32RemS | I64RemS => w.i64rems, - I32RemU | I64RemU => w.i64remu, - I32And | I64And => w.i64and, - I32Or | I64Or => w.i64or, - I32Xor | I64Xor => w.i64xor, - I32Shl | I64Shl => w.i64shl, - I32ShrS | I64ShrS => w.i64shrs, - I32ShrU | I64ShrU => w.i64shru, - I32Rotl | I64Rotl => w.i64rotl, - I32Rotr | I64Rotr => w.i64rotr, - - // Returning None makes the gas instrumentation fail which we intend for - // unsupported or unknown instructions. Offchain we might allow indeterminism and hence - // use the fallback weight for those instructions. - _ if matches!(self.determinism, Determinism::Relaxed) && w.fallback > 0 => w.fallback, - _ => return None, - }; - Some(weight) - } - - fn memory_grow_cost(&self) -> gas_metering::MemoryGrowCost { - // We benchmarked the memory.grow instruction with the maximum allowed pages. - // The cost for growing is therefore already included in the instruction cost. - gas_metering::MemoryGrowCost::Free - } - - fn call_per_local_cost(&self) -> u32 { - self.schedule.instruction_weights.call_per_local - } -} - #[cfg(test)] mod test { use super::*; diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 769caef0736fe..7fbd697e509dc 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -22,7 +22,7 @@ pub mod meter; use crate::{ exec::{AccountIdOf, Key}, weights::WeightInfo, - AddressGenerator, BalanceOf, CodeHash, Config, ContractInfoOf, DeletionQueue, + AddressGenerator, BalanceOf, CodeHash, CodeInfo, Config, ContractInfoOf, DeletionQueue, DeletionQueueCounter, Error, Pallet, TrieId, SENTINEL, }; use codec::{Decode, Encode, MaxEncodedLen}; @@ -30,19 +30,22 @@ use frame_support::{ dispatch::DispatchError, storage::child::{self, ChildInfo}, weights::Weight, - DefaultNoBound, RuntimeDebugNoBound, + CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; +use sp_core::Get; use sp_io::KillStorageResult; use sp_runtime::{ traits::{Hash, Saturating, Zero}, - RuntimeDebug, + BoundedBTreeMap, DispatchResult, RuntimeDebug, }; use sp_std::{marker::PhantomData, ops::Deref, prelude::*}; +use self::meter::Diff; + /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] #[scale_info(skip_type_params(T))] pub struct ContractInfo { /// Unique ID for the subtree encoded as a bytes vector. @@ -66,6 +69,12 @@ pub struct ContractInfo { /// We need to store this information separately so it is not used when calculating any refunds /// since the base deposit can only ever be refunded on contract termination. storage_base_deposit: BalanceOf, + /// Map of code hashes and deposit balances. + /// + /// Tracks the code hash and deposit held for adding delegate dependencies. Dependencies added + /// to the map can not be removed from the chain state and can be safely used for delegate + /// calls. + delegate_dependencies: BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies>, } impl ContractInfo { @@ -101,6 +110,7 @@ impl ContractInfo { storage_byte_deposit: Zero::zero(), storage_item_deposit: Zero::zero(), storage_base_deposit: Zero::zero(), + delegate_dependencies: Default::default(), }; Ok(contract) @@ -123,11 +133,16 @@ impl ContractInfo { .saturating_sub(Pallet::::min_balance()) } - /// Return the account that storage deposits should be deposited into. + /// Returns the account that storage deposits should be deposited into. pub fn deposit_account(&self) -> &DepositAccount { &self.deposit_account } + /// Returns the storage base deposit of the contract. + pub fn storage_base_deposit(&self) -> BalanceOf { + self.storage_base_deposit + } + /// Reads a storage kv pair of a contract. /// /// The read is performed from the `trie_id` only. The `address` is not necessary. If the @@ -201,6 +216,68 @@ impl ContractInfo { }) } + /// Sets and returns the contract base deposit. + /// + /// The base deposit is updated when the `code_hash` of the contract changes, as it depends on + /// the deposit paid to upload the contract's code. + pub fn update_base_deposit(&mut self, code_info: &CodeInfo) -> BalanceOf { + let ed = Pallet::::min_balance(); + let info_deposit = + Diff { bytes_added: self.encoded_size() as u32, items_added: 1, ..Default::default() } + .update_contract::(None) + .charge_or_zero(); + + // Instantiating the contract prevents its code to be deleted, therefore the base deposit + // includes a fraction (`T::CodeHashLockupDepositPercent`) of the original storage deposit + // to prevent abuse. + let upload_deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); + + // Instantiate needs to transfer at least the minimum balance in order to pull the + // deposit account into existence. + // We also add another `ed` here which goes to the contract's own account into existence. + let deposit = info_deposit.saturating_add(upload_deposit).max(ed).saturating_add(ed); + + self.storage_base_deposit = deposit; + deposit + } + + /// Adds a new delegate dependency to the contract. + /// The `amount` is the amount of funds that will be reserved for the dependency. + /// + /// Returns an error if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. + pub fn add_delegate_dependency( + &mut self, + code_hash: CodeHash, + amount: BalanceOf, + ) -> DispatchResult { + self.delegate_dependencies + .try_insert(code_hash, amount) + .map_err(|_| Error::::MaxDelegateDependenciesReached)? + .map_or(Ok(()), |_| Err(Error::::DelegateDependencyAlreadyExists)) + .map_err(Into::into) + } + + /// Removes the delegate dependency from the contract and returns the deposit held for this + /// dependency. + /// + /// Returns an error if the entry doesn't exist. + pub fn remove_delegate_dependency( + &mut self, + code_hash: &CodeHash, + ) -> Result, DispatchError> { + self.delegate_dependencies + .remove(code_hash) + .ok_or(Error::::DelegateDependencyNotFound.into()) + } + + /// Returns the delegate_dependencies of the contract. + pub fn delegate_dependencies( + &self, + ) -> &BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies> { + &self.delegate_dependencies + } + /// Push a contract's trie to the deletion queue for lazy removal. /// /// You must make sure that the contract is also removed when queuing the trie for deletion. diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index 506f4f0d86649..93885b37b4795 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -19,26 +19,26 @@ use crate::{ storage::{ContractInfo, DepositAccount}, - BalanceOf, Config, Error, Inspect, Origin, Pallet, System, + BalanceOf, CodeInfo, Config, Error, Inspect, Origin, Pallet, StorageDeposit as Deposit, System, }; -use codec::Encode; + use frame_support::{ dispatch::{fmt::Debug, DispatchError}, ensure, traits::{ - tokens::{Fortitude::Polite, Preservation::Protect, WithdrawConsequence}, - Currency, ExistenceRequirement, Get, + fungible::Mutate, + tokens::{Fortitude::Polite, Preservation, WithdrawConsequence}, + Get, }, DefaultNoBound, RuntimeDebugNoBound, }; -use pallet_contracts_primitives::StorageDeposit as Deposit; use sp_runtime::{ traits::{Saturating, Zero}, FixedPointNumber, FixedU128, }; use sp_std::{marker::PhantomData, vec::Vec}; -/// Deposit that uses the native currency's balance type. +/// Deposit that uses the native fungible's balance type. pub type DepositOf = Deposit>; /// A production root storage meter that actually charges from its origin. @@ -90,7 +90,7 @@ pub trait Ext { /// This [`Ext`] is used for actual on-chain execution when balance needs to be charged. /// -/// It uses [`ReservableCurrency`] in order to do accomplish the reserves. +/// It uses [`frame_support::traits::fungible::Mutate`] in order to do accomplish the reserves. pub enum ReservingExt {} /// Used to implement a type state pattern for the meter. @@ -398,7 +398,7 @@ where T: Config, E: Ext, { - /// Charge `diff` from the meter. + /// Charges `diff` from the meter. pub fn charge(&mut self, diff: &Diff) { match &mut self.own_contribution { Contribution::Alive(own) => *own = own.saturating_add(diff), @@ -406,49 +406,55 @@ where }; } - /// Charge from `origin` a storage deposit for contract instantiation. + /// Adds a deposit charge. + /// + /// Use this method instead of [`Self::charge`] when the charge is not the result of a storage + /// change. This is the case when a `delegate_dependency` is added or removed, or when the + /// `code_hash` is updated. [`Self::charge`] cannot be used here because we keep track of the + /// deposit charge separately from the storage charge. + pub fn charge_deposit(&mut self, deposit_account: DepositAccount, amount: DepositOf) { + self.total_deposit = self.total_deposit.saturating_add(&amount); + self.charges.push(Charge { deposit_account, amount, terminated: false }); + } + + /// Charges from `origin` a storage deposit for contract instantiation. /// /// This immediately transfers the balance in order to create the account. pub fn charge_instantiate( &mut self, origin: &T::AccountId, contract: &T::AccountId, - info: &mut ContractInfo, + contract_info: &mut ContractInfo, + code_info: &CodeInfo, ) -> Result, DispatchError> { debug_assert!(self.is_alive()); - let ed = Pallet::::min_balance(); - let mut deposit = - Diff { bytes_added: info.encoded_size() as u32, items_added: 1, ..Default::default() } - .update_contract::(None); - - // Instantiate needs to transfer at least the minimum balance in order to pull the - // deposit account into existence. - // We also add another `ed` here which goes to the contract's own account into existence. - deposit = deposit.max(Deposit::Charge(ed)).saturating_add(&Deposit::Charge(ed)); - if deposit.charge_or_zero() > self.limit { + + let deposit = contract_info.update_base_deposit(&code_info); + if deposit > self.limit { return Err(>::StorageDepositLimitExhausted.into()) } + let deposit = Deposit::Charge(deposit); + // We do not increase `own_contribution` because this will be charged later when the // contract execution does conclude and hence would lead to a double charge. self.total_deposit = deposit.clone(); - info.storage_base_deposit = deposit.charge_or_zero(); // Normally, deposit charges are deferred to be able to coalesce them with refunds. // However, we need to charge immediately so that the account is created before // charges possibly below the ed are collected and fail. E::charge( origin, - info.deposit_account(), + contract_info.deposit_account(), &deposit.saturating_sub(&Deposit::Charge(ed)), false, )?; - System::::inc_consumers(info.deposit_account())?; + System::::inc_consumers(contract_info.deposit_account())?; // We also need to make sure that the contract's account itself exists. - T::Currency::transfer(origin, contract, ed, ExistenceRequirement::KeepAlive)?; + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; System::::inc_consumers(contract)?; Ok(deposit) @@ -512,7 +518,7 @@ impl Ext for ReservingExt { // We are sending the `min_leftover` and the `min_balance` from the origin // account as part of a contract call. Hence origin needs to have those left over // as free balance after accounting for all deposits. - let max = T::Currency::reducible_balance(origin, Protect, Polite) + let max = T::Currency::reducible_balance(origin, Preservation::Preserve, Polite) .saturating_sub(min_leftover) .saturating_sub(Pallet::::min_balance()); let default = max.min(T::DefaultDepositLimit::get()); @@ -532,12 +538,11 @@ impl Ext for ReservingExt { terminated: bool, ) -> Result<(), DispatchError> { match amount { - Deposit::Charge(amount) => T::Currency::transfer( - origin, - deposit_account, - *amount, - ExistenceRequirement::KeepAlive, - ), + Deposit::Charge(amount) | Deposit::Refund(amount) if amount.is_zero() => return Ok(()), + Deposit::Charge(amount) => { + T::Currency::transfer(origin, deposit_account, *amount, Preservation::Preserve)?; + Ok(()) + }, Deposit::Refund(amount) => { if terminated { System::::dec_consumers(&deposit_account); @@ -546,9 +551,11 @@ impl Ext for ReservingExt { deposit_account, origin, *amount, - // We can safely use `AllowDeath` because our own consumer prevents an removal. - ExistenceRequirement::AllowDeath, - ) + // We can safely make it `Expendable` because our own consumer prevents a + // removal. + Preservation::Expendable, + )?; + Ok(()) }, } } @@ -664,6 +671,7 @@ mod tests { storage_byte_deposit: info.bytes_deposit, storage_item_deposit: info.items_deposit, storage_base_deposit: Default::default(), + delegate_dependencies: Default::default(), } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index c32999d0ade3a..3132b8e39f7da 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -15,20 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -use self::test_utils::hash; -use crate as pallet_contracts; +mod pallet_dummy; +mod unsafe_debug; + +use self::test_utils::{ensure_stored, expected_deposit, hash}; use crate::{ + self as pallet_contracts, chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, }, exec::{Frame, Key}, + migration::codegen::LATEST_MIGRATION_VERSION, storage::DeletionQueueManager, tests::test_utils::{get_contract, get_contract_checked}, - wasm::{Determinism, PrefabWasmModule, ReturnCode as RuntimeReturnCode}, + wasm::{Determinism, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeStorage, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, - DefaultAddressGenerator, DeletionQueueCounter, Error, Origin, Pallet, Schedule, + BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, + DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, + MigrationInProgress, Origin, Pallet, PristineCode, Schedule, }; use assert_matches::assert_matches; use codec::Encode; @@ -38,39 +43,38 @@ use frame_support::{ parameter_types, storage::child, traits::{ - ConstU32, ConstU64, Contains, Currency, ExistenceRequirement, LockableCurrency, OnIdle, - OnInitialize, WithdrawReasons, + fungible::{BalancedHold, Inspect, InspectHold, Mutate, MutateHold}, + tokens::Preservation, + ConstU32, ConstU64, Contains, OnIdle, OnInitialize, StorageVersion, }, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system::{EventRecord, Phase}; +use pallet_contracts_primitives::CodeUploadReturnValue; use pretty_assertions::{assert_eq, assert_ne}; use sp_core::ByteArray; use sp_io::hashing::blake2_256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ - testing::{Header, H256}, + testing::H256, traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, - AccountId32, TokenError, + AccountId32, BuildStorage, Perbill, TokenError, }; use std::ops::Deref; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Randomness: pallet_insecure_randomness_collective_flip::{Pallet, Storage}, Utility: pallet_utility::{Pallet, Call, Storage, Event}, - Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, + Contracts: pallet_contracts::{Pallet, Call, Storage, Event, HoldReason}, Proxy: pallet_proxy::{Pallet, Call, Storage, Event}, + Dummy: pallet_dummy } ); @@ -82,16 +86,20 @@ macro_rules! assert_return_code { macro_rules! assert_refcount { ( $code_hash:expr , $should:expr $(,)? ) => {{ - let is = crate::OwnerInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); + let is = crate::CodeInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); assert_eq!(is, $should); }}; } pub mod test_utils { - use super::{Balances, Hash, SysConfig, Test}; - use crate::{exec::AccountIdOf, CodeHash, Config, ContractInfo, ContractInfoOf, Nonce}; - use codec::Encode; - use frame_support::traits::Currency; + + use super::{DepositPerByte, DepositPerItem, Hash, SysConfig, Test}; + use crate::{ + exec::AccountIdOf, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, + ContractInfoOf, Nonce, PristineCode, + }; + use codec::{Encode, MaxEncodedLen}; + use frame_support::traits::{fungible::Mutate, Currency}; pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { let nonce = >::mutate(|counter| { @@ -99,15 +107,15 @@ pub mod test_utils { *counter }); set_balance(address, ::Currency::minimum_balance() * 10); + >::insert(code_hash, CodeInfo::new(address.clone())); let contract = >::new(&address, nonce, code_hash).unwrap(); >::insert(address, contract); } pub fn set_balance(who: &AccountIdOf, amount: u64) { - let imbalance = Balances::deposit_creating(who, amount); - drop(imbalance); + let _ = ::Currency::set_balance(who, amount); } pub fn get_balance(who: &AccountIdOf) -> u64 { - Balances::free_balance(who) + ::Currency::free_balance(who) } pub fn get_contract(addr: &AccountIdOf) -> ContractInfo { get_contract_checked(addr).unwrap() @@ -115,9 +123,26 @@ pub mod test_utils { pub fn get_contract_checked(addr: &AccountIdOf) -> Option> { ContractInfoOf::::get(addr) } + pub fn get_code_deposit(code_hash: &CodeHash) -> BalanceOf { + crate::CodeInfoOf::::get(code_hash).unwrap().deposit() + } pub fn hash(s: &S) -> <::Hashing as Hash>::Output { <::Hashing as Hash>::hash_of(s) } + pub fn expected_deposit(code_len: usize) -> u64 { + // For code_info, the deposit for max_encoded_len is taken. + let code_info_len = CodeInfo::::max_encoded_len() as u64; + // Calculate deposit to be reserved. + // We add 2 storage items: one for code, other for code_info + DepositPerByte::get().saturating_mul(code_len as u64 + code_info_len) + + DepositPerItem::get().saturating_mul(2) + } + pub fn ensure_stored(code_hash: CodeHash) -> usize { + // Assert that code_info is stored + assert!(CodeInfoOf::::contains_key(&code_hash)); + // Assert that contract code is stored, and get its size. + PristineCode::::try_get(&code_hash).unwrap().len() + } } impl Test { @@ -173,6 +198,8 @@ impl ChainExtension for TestExtension { where E: Ext, { + use codec::Decode; + let func_id = env.func_id(); let id = env.ext_id() as u32 | func_id as u32; match func_id { @@ -192,7 +219,11 @@ impl ChainExtension for TestExtension { }, 2 => { let mut env = env.buf_in_buf_out(); - let weight = Weight::from_parts(env.read(5)?[4].into(), 0); + let mut enc = &env.read(9)?[4..8]; + let weight = Weight::from_parts( + u32::decode(&mut enc).map_err(|_| Error::::ContractTrapped)?.into(), + 0, + ); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, @@ -289,14 +320,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -322,8 +352,8 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); - type MaxHolds = (); + type RuntimeHoldReason = RuntimeHoldReason; + type MaxHolds = ConstU32<1>; } impl pallet_timestamp::Config for Test { @@ -354,14 +384,18 @@ impl pallet_proxy::Config for Test { type AnnouncementDepositFactor = ConstU64<1>; } +impl pallet_dummy::Config for Test {} + parameter_types! { pub MySchedule: Schedule = { - let mut schedule = >::default(); - schedule.instruction_weights.fallback = 1; + let schedule = >::default(); schedule }; pub static DepositPerByte: BalanceOf = 1; pub const DepositPerItem: BalanceOf = 2; + pub static MaxDelegateDependencies: u32 = 32; + + pub static CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); // We need this one set high enough for running benchmarks. pub static DefaultDepositLimit: BalanceOf = 10_000_000; } @@ -427,6 +461,12 @@ impl Config for Test { type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type RuntimeHoldReason = RuntimeHoldReason; + type Migrations = crate::migration::codegen::BenchMigrations; + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type MaxDelegateDependencies = MaxDelegateDependencies; + #[cfg(feature = "unsafe-debug")] + type Debug = unsafe_debug::TestDebugger; } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); @@ -438,32 +478,61 @@ pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 102 pub struct ExtBuilder { existential_deposit: u64, + storage_version: Option, + code_hashes: Vec>, } + impl Default for ExtBuilder { fn default() -> Self { - Self { existential_deposit: ExistentialDeposit::get() } + Self { + existential_deposit: ExistentialDeposit::get(), + storage_version: None, + code_hashes: vec![], + } } } + impl ExtBuilder { pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { self.existential_deposit = existential_deposit; self } + pub fn with_code_hashes(mut self, code_hashes: Vec>) -> Self { + self.code_hashes = code_hashes; + self + } pub fn set_associated_consts(&self) { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); } + pub fn set_storage_version(mut self, version: u16) -> Self { + self.storage_version = Some(StorageVersion::new(version)); + self + } pub fn build(self) -> sp_io::TestExternalities { use env_logger::{Builder, Env}; let env = Env::new().default_filter_or("runtime=debug"); let _ = Builder::from_env(env).is_test(true).try_init(); self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![] } .assimilate_storage(&mut t) .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); - ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| { + use frame_support::traits::OnGenesis; + + Pallet::::on_genesis(); + if let Some(storage_version) = self.storage_version { + storage_version.put::>(); + } + System::set_block_number(1) + }); + ext.execute_with(|| { + for code_hash in self.code_hashes { + CodeInfoOf::::insert(code_hash, crate::CodeInfo::new(ALICE)); + } + }); ext } } @@ -528,7 +597,7 @@ impl Default for Origin { #[test] fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 100_000_000); + let _ = ::Currency::set_balance(&ALICE, 100_000_000); let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( @@ -544,12 +613,89 @@ fn calling_plain_account_fails() { }); } +#[test] +fn migration_on_idle_hooks_works() { + // Defines expectations of how many migration steps can be done given the weight limit. + let tests = [ + (Weight::zero(), LATEST_MIGRATION_VERSION - 2), + (::WeightInfo::migrate() + 1.into(), LATEST_MIGRATION_VERSION - 1), + (Weight::MAX, LATEST_MIGRATION_VERSION), + ]; + + for (weight, expected_version) in tests { + ExtBuilder::default() + .set_storage_version(LATEST_MIGRATION_VERSION - 2) + .build() + .execute_with(|| { + MigrationInProgress::::set(Some(Default::default())); + Contracts::on_idle(System::block_number(), weight); + assert_eq!(StorageVersion::get::>(), expected_version); + }); + } +} + +#[test] +fn migration_in_progress_works() { + let (wasm, code_hash) = compile_module::("dummy").unwrap(); + + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + MigrationInProgress::::set(Some(Default::default())); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + vec![], + None, + Determinism::Enforced + ), + Error::::MigrationInProgress, + ); + assert_err!( + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + Error::::MigrationInProgress, + ); + assert_err!( + Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB.clone(), code_hash), + Error::::MigrationInProgress, + ); + assert_err_ignore_postinfo!( + Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, vec![],), + Error::::MigrationInProgress, + ); + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 100_000, + GAS_LIMIT, + None, + wasm, + vec![], + vec![], + ), + Error::::MigrationInProgress, + ); + assert_err_ignore_postinfo!( + Contracts::instantiate( + RuntimeOrigin::signed(ALICE), + 100_000, + GAS_LIMIT, + None, + code_hash, + vec![], + vec![], + ), + Error::::MigrationInProgress, + ); + }); +} + #[test] fn instantiate_and_call_and_deposit_event() { let (wasm, code_hash) = compile_module::("event_and_return_on_deploy").unwrap(); ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); let value = 100; @@ -599,7 +745,7 @@ fn instantiate_and_call_and_deposit_event() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: deposit_account.clone(), - free_balance: 131, + free_balance: 132, }), topics: vec![], }, @@ -608,7 +754,7 @@ fn instantiate_and_call_and_deposit_event() { event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: deposit_account.clone(), - amount: 131, + amount: 132, }), topics: vec![], }, @@ -672,7 +818,7 @@ fn deposit_event_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, 30_000, @@ -713,12 +859,13 @@ fn deposit_event_max_value_limit() { }); } +// Fail out of fuel (ref_time weight) in the engine. #[test] -fn run_out_of_gas() { +fn run_out_of_fuel_engine() { let (wasm, _code_hash) = compile_module::("run_out_of_gas").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, @@ -751,6 +898,155 @@ fn run_out_of_gas() { }); } +// Fail out of fuel (ref_time weight) in the host. +#[test] +fn run_out_of_fuel_host() { + let (code, _hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = ::Currency::minimum_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + + let addr = Contracts::bare_instantiate( + ALICE, + min_balance * 100, + GAS_LIMIT, + None, + Code::Upload(code), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); + + // Use chain extension to charge more ref_time than it is available. + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + gas_limit, + None, + ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ) + .result; + assert_err!(result, >::OutOfGas); + }); +} + +#[test] +fn gas_syncs_work() { + let (wasm0, _code_hash) = compile_module::("seal_input_noop").unwrap(); + let (wasm1, _code_hash) = compile_module::("seal_input_once").unwrap(); + let (wasm2, _code_hash) = compile_module::("seal_input_twice").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Instantiate noop contract. + let addr0 = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm0), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + // Instantiate 1st contract. + let addr1 = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm1), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + // Instantiate 2nd contract. + let addr2 = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm2), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + let result = Contracts::bare_call( + ALICE, + addr0, + 0, + GAS_LIMIT, + None, + 1u8.to_le_bytes().to_vec(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ); + assert_ok!(result.result); + let engine_consumed_noop = result.gas_consumed.ref_time(); + + let result = Contracts::bare_call( + ALICE, + addr1, + 0, + GAS_LIMIT, + None, + 1u8.to_le_bytes().to_vec(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ); + assert_ok!(result.result); + let gas_consumed_once = result.gas_consumed.ref_time(); + let host_consumed_once = ::Schedule::get().host_fn_weights.input.ref_time(); + let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; + + let result = Contracts::bare_call( + ALICE, + addr2, + 0, + GAS_LIMIT, + None, + 1u8.to_le_bytes().to_vec(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ); + assert_ok!(result.result); + let gas_consumed_twice = result.gas_consumed.ref_time(); + let host_consumed_twice = host_consumed_once * 2; + let engine_consumed_twice = gas_consumed_twice - host_consumed_twice - engine_consumed_noop; + + // Second contract just repeats first contract's instructions twice. + // If runtime syncs gas with the engine properly, this should pass. + assert_eq!(engine_consumed_twice, engine_consumed_once * 2); + }); +} + /// Check that contracts with the same account id have different trie ids. /// Check the `Nonce` storage item for more information. #[test] @@ -758,7 +1054,7 @@ fn instantiate_unique_trie_id() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, None, Determinism::Enforced) .unwrap(); @@ -825,7 +1121,7 @@ fn storage_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, 30_000, @@ -876,7 +1172,7 @@ fn deploy_and_call_other_contract() { let min_balance = ::Currency::minimum_balance(); // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let caller_addr = Contracts::bare_instantiate( ALICE, 100_000, @@ -931,7 +1227,7 @@ fn deploy_and_call_other_contract() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: deposit_account.clone(), - free_balance: 131, + free_balance: 132, }), topics: vec![], }, @@ -940,7 +1236,7 @@ fn deploy_and_call_other_contract() { event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: deposit_account.clone(), - amount: 131, + amount: 132, }), topics: vec![], }, @@ -1024,7 +1320,7 @@ fn delegate_call() { let (callee_wasm, callee_code_hash) = compile_module::("delegate_call_lib").unwrap(); ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' let caller_addr = Contracts::bare_instantiate( @@ -1064,7 +1360,7 @@ fn delegate_call() { fn transfer_allow_death_cannot_kill_account() { let (wasm, _code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. let addr = Contracts::bare_instantiate( @@ -1088,11 +1384,11 @@ fn transfer_allow_death_cannot_kill_account() { let total_balance = ::Currency::total_balance(&addr); assert_err!( - <::Currency as Currency>::transfer( + <::Currency as Mutate>::transfer( &addr, &ALICE, total_balance, - ExistenceRequirement::AllowDeath, + Preservation::Expendable, ), TokenError::Frozen, ); @@ -1105,7 +1401,7 @@ fn transfer_allow_death_cannot_kill_account() { fn cannot_self_destruct_through_draning() { let (wasm, _code_hash) = compile_module::("drain").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. let addr = Contracts::bare_instantiate( @@ -1149,7 +1445,7 @@ fn cannot_self_destruct_through_draning() { fn cannot_self_destruct_through_storage_refund_after_price_change() { let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); // Instantiate the BOB contract. @@ -1209,7 +1505,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { fn cannot_self_destruct_while_live() { let (wasm, _code_hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. let addr = Contracts::bare_instantiate( @@ -1253,8 +1549,8 @@ fn cannot_self_destruct_while_live() { fn self_destruct_works() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(1_000).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&DJANGO, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&DJANGO, 1_000_000); // Instantiate the BOB contract. let addr = Contracts::bare_instantiate( @@ -1289,11 +1585,11 @@ fn self_destruct_works() { // Check that account is gone assert!(get_contract_checked(&addr).is_none()); - assert_eq!(Balances::total_balance(&addr), 0); + assert_eq!(::Currency::total_balance(&addr), 0); // check that the beneficiary (django) got remaining balance let ed = ::Currency::minimum_balance(); - assert_eq!(Balances::free_balance(DJANGO), 1_000_000 + 100_000 + ed); + assert_eq!(::Currency::free_balance(DJANGO), 1_000_000 + 100_000 + ed); pretty_assertions::assert_eq!( System::events(), @@ -1360,7 +1656,7 @@ fn destroy_contract_and_transfer_funds() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create code hash for bob to instantiate - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Enforced).unwrap(); // This deploys the BOB contract, which in turn deploys the CHARLIE contract during @@ -1404,7 +1700,7 @@ fn destroy_contract_and_transfer_funds() { fn cannot_self_destruct_in_constructor() { let (wasm, _) = compile_module::("self_destructing_constructor").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Fail to instantiate the BOB because the contructor calls seal_terminate. assert_err_ignore_postinfo!( @@ -1427,7 +1723,7 @@ fn crypto_hashes() { let (wasm, _code_hash) = compile_module::("crypto_hashes").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the CRYPTO_HASHES contract. let addr = Contracts::bare_instantiate( @@ -1490,7 +1786,7 @@ fn transfer_return_code() { let (wasm, _code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -1508,7 +1804,7 @@ fn transfer_return_code() { .account_id; // Contract has only the minimal balance so any transfer will fail. - Balances::make_free_balance_be(&addr, min_balance); + ::Currency::set_balance(&addr, min_balance); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1532,8 +1828,8 @@ fn call_return_code() { let (callee_code, _callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); - let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let addr_bob = Contracts::bare_instantiate( ALICE, @@ -1549,7 +1845,7 @@ fn call_return_code() { .result .unwrap() .account_id; - Balances::make_free_balance_be(&addr_bob, min_balance); + ::Currency::set_balance(&addr_bob, min_balance); // Contract calls into Django which is no valid contract let result = Contracts::bare_call( @@ -1581,7 +1877,7 @@ fn call_return_code() { .result .unwrap() .account_id; - Balances::make_free_balance_be(&addr_django, min_balance); + ::Currency::set_balance(&addr_django, min_balance); // Contract has only the minimal balance so any transfer will fail. let result = Contracts::bare_call( @@ -1604,7 +1900,7 @@ fn call_return_code() { assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. - Balances::make_free_balance_be(&addr_bob, min_balance + 1000); + ::Currency::set_balance(&addr_bob, min_balance + 1000); let result = Contracts::bare_call( ALICE, addr_bob.clone(), @@ -1652,8 +1948,8 @@ fn instantiate_return_code() { let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); - let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let callee_hash = callee_hash.as_ref().to_vec(); assert_ok!(Contracts::instantiate_with_code( @@ -1682,7 +1978,7 @@ fn instantiate_return_code() { .account_id; // Contract has only the minimal balance so any transfer will fail. - Balances::make_free_balance_be(&addr, min_balance); + ::Currency::set_balance(&addr, min_balance); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1699,7 +1995,7 @@ fn instantiate_return_code() { assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid - Balances::make_free_balance_be(&addr, min_balance + 10_000); + ::Currency::set_balance(&addr, min_balance + 10_000); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1754,7 +2050,7 @@ fn disabled_chain_extension_wont_deploy() { let (code, _hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); TestExtension::disable(); assert_err_ignore_postinfo!( Contracts::instantiate_with_code( @@ -1776,7 +2072,7 @@ fn disabled_chain_extension_errors_on_call() { let (code, _hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, min_balance * 100, @@ -1794,7 +2090,7 @@ fn disabled_chain_extension_errors_on_call() { TestExtension::disable(); assert_err_ignore_postinfo!( Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), - Error::::NoChainExtension, + Error::::CodeRejected, ); }); } @@ -1804,7 +2100,7 @@ fn chain_extension_works() { let (code, _hash) = compile_module::("chain_extension").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, min_balance * 100, @@ -1860,7 +2156,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -1873,7 +2169,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -1886,7 +2182,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -1951,7 +2247,7 @@ fn chain_extension_temp_storage_works() { let (code, _hash) = compile_module::("chain_extension_temp_storage").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, min_balance * 100, @@ -1998,7 +2294,7 @@ fn lazy_removal_works() { let (code, _hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -2050,7 +2346,7 @@ fn lazy_batch_removal_works() { let (code, _hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let mut tries: Vec = vec![]; for i in 0..3u8 { @@ -2118,7 +2414,7 @@ fn lazy_removal_partial_remove_works() { let trie = ext.execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -2200,7 +2496,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let (code, _hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -2272,7 +2568,7 @@ fn lazy_removal_does_not_use_all_weight() { let (trie, vals, weight_per_key) = ext.execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -2360,7 +2656,7 @@ fn deletion_queue_ring_buffer_overflow() { ext.execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let mut tries: Vec = vec![]; // add 3 contracts to the deletion queue @@ -2419,7 +2715,7 @@ fn deletion_queue_ring_buffer_overflow() { fn refcounter() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); // Create two contracts with the same code and check that they do in fact share it. @@ -2493,7 +2789,7 @@ fn refcounter() { assert_refcount!(code_hash, 1); // Pristine code should still be there - crate::PristineCode::::get(code_hash).unwrap(); + PristineCode::::get(code_hash).unwrap(); // remove the last contract assert_ok!(Contracts::call( @@ -2508,90 +2804,6 @@ fn refcounter() { // refcount is `0` but code should still exists because it needs to be removed manually assert!(crate::PristineCode::::contains_key(&code_hash)); - assert!(crate::CodeStorage::::contains_key(&code_hash)); - }); -} - -#[test] -fn reinstrument_does_charge() { - let (wasm, code_hash) = compile_module::("return_with_data").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let min_balance = ::Currency::minimum_balance(); - let zero = 0u32.to_le_bytes().encode(); - let code_len = wasm.len() as u32; - - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - zero.clone(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - - // Call the contract two times without reinstrument - - let result0 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); - assert!(!result0.result.unwrap().did_revert()); - - let result1 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); - assert!(!result1.result.unwrap().did_revert()); - - // They should match because both where called with the same schedule. - assert_eq!(result0.gas_consumed, result1.gas_consumed); - - // We cannot change the schedule. Instead, we decrease the version of the deployed - // contract below the current schedule's version. - crate::CodeStorage::mutate(&code_hash, |code: &mut Option>| { - code.as_mut().unwrap().decrement_version(); - }); - - // This call should trigger reinstrumentation - let result2 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); - assert!(!result2.result.unwrap().did_revert()); - assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); - assert_eq!( - result2.gas_consumed.ref_time(), - result1.gas_consumed.ref_time() + - ::WeightInfo::reinstrument(code_len).ref_time(), - ); }); } @@ -2600,7 +2812,7 @@ fn debug_message_works() { let (wasm, _code_hash) = compile_module::("debug_message_works").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, 30_000, @@ -2637,7 +2849,7 @@ fn debug_message_logging_disabled() { let (wasm, _code_hash) = compile_module::("debug_message_logging_disabled").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, 30_000, @@ -2676,7 +2888,7 @@ fn debug_message_invalid_utf8() { let (wasm, _code_hash) = compile_module::("debug_message_invalid_utf8").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, 30_000, @@ -2713,7 +2925,7 @@ fn gas_estimation_nested_call_fixed_limit() { let (callee_code, _callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr_caller = Contracts::bare_instantiate( ALICE, @@ -2785,7 +2997,7 @@ fn gas_estimation_nested_call_fixed_limit() { .result ); - // Make the same call using proof_size a but less than estimated. Should fail with OutOfGas. + // Make the same call using proof_size but less than estimated. Should fail with OutOfGas. let result = Contracts::bare_call( ALICE, addr_caller, @@ -2809,8 +3021,8 @@ fn gas_estimation_call_runtime() { let (callee_code, _callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); - let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let addr_caller = Contracts::bare_instantiate( ALICE, @@ -2827,7 +3039,7 @@ fn gas_estimation_call_runtime() { .unwrap() .account_id; - let addr_callee = Contracts::bare_instantiate( + Contracts::bare_instantiate( ALICE, min_balance * 100, GAS_LIMIT, @@ -2839,14 +3051,13 @@ fn gas_estimation_call_runtime() { CollectEvents::Skip, ) .result - .unwrap() - .account_id; + .unwrap(); // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. - let call = RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: addr_callee, - value: min_balance * 10, + let call = RuntimeCall::Dummy(pallet_dummy::Call::overestimate_pre_charge { + pre_charge: Weight::from_parts(10_000_000, 0), + actual_weight: Weight::from_parts(100, 0), }); let result = Contracts::bare_call( ALICE, @@ -2888,8 +3099,8 @@ fn call_runtime_reentrancy_guarded() { let (callee_code, _callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); - let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let addr_caller = Contracts::bare_instantiate( ALICE, @@ -2955,7 +3166,7 @@ fn ecdsa_recover() { let (wasm, _code_hash) = compile_module::("ecdsa_recover").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the ecdsa_recover contract. let addr = Contracts::bare_instantiate( @@ -3019,7 +3230,7 @@ fn bare_instantiate_returns_events() { let (wasm, _code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let result = Contracts::bare_instantiate( ALICE, @@ -3044,7 +3255,7 @@ fn bare_instantiate_does_not_return_events() { let (wasm, _code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let result = Contracts::bare_instantiate( ALICE, @@ -3069,7 +3280,7 @@ fn bare_call_returns_events() { let (wasm, _code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -3110,7 +3321,7 @@ fn bare_call_does_not_return_events() { let (wasm, _code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = ::Currency::minimum_balance(); - let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let addr = Contracts::bare_instantiate( ALICE, @@ -3151,7 +3362,7 @@ fn sr25519_verify() { let (wasm, _code_hash) = compile_module::("sr25519_verify").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the sr25519_verify contract. let addr = Contracts::bare_instantiate( @@ -3222,7 +3433,7 @@ fn failed_deposit_charge_should_roll_back_call() { let execute = || { ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate both contracts. let addr_caller = Contracts::bare_instantiate( @@ -3301,37 +3512,33 @@ fn upload_code_works() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Drop previous events initialize_block(2); - assert!(!>::contains_key(code_hash)); + assert!(!PristineCode::::contains_key(&code_hash)); + assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)), Determinism::Enforced, )); - assert!(>::contains_key(code_hash)); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); assert_eq!( System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { - who: ALICE, - amount: 173, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], - }, - ] + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE + }), + topics: vec![code_hash], + },] ); }); } @@ -3339,9 +3546,11 @@ fn upload_code_works() { #[test] fn upload_code_limit_too_low() { let (wasm, _code_hash) = compile_module::("dummy").unwrap(); + let deposit_expected = expected_deposit(wasm.len()); + let deposit_insufficient = deposit_expected.saturating_sub(1); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Drop previous events initialize_block(2); @@ -3350,7 +3559,7 @@ fn upload_code_limit_too_low() { Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, - Some(codec::Compact(100)), + Some(codec::Compact(deposit_insufficient)), Determinism::Enforced ), >::StorageDepositLimitExhausted, @@ -3363,9 +3572,11 @@ fn upload_code_limit_too_low() { #[test] fn upload_code_not_enough_balance() { let (wasm, _code_hash) = compile_module::("dummy").unwrap(); + let deposit_expected = expected_deposit(wasm.len()); + let deposit_insufficient = deposit_expected.saturating_sub(1); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 150); + let _ = ::Currency::set_balance(&ALICE, deposit_insufficient); // Drop previous events initialize_block(2); @@ -3389,7 +3600,7 @@ fn remove_code_works() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Drop previous events initialize_block(2); @@ -3400,38 +3611,29 @@ fn remove_code_works() { Some(codec::Compact(1_000)), Determinism::Enforced, )); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); - assert!(>::contains_key(code_hash)); assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); - assert!(!>::contains_key(code_hash)); - assert_eq!( System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { - who: ALICE, - amount: 173, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Unreserved { - who: ALICE, - amount: 173, + event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { + code_hash, + deposit_released: deposit_expected, + remover: ALICE }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { code_hash }), topics: vec![code_hash], }, ] @@ -3444,7 +3646,7 @@ fn remove_code_wrong_origin() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Drop previous events initialize_block(2); @@ -3455,6 +3657,8 @@ fn remove_code_wrong_origin() { Some(codec::Compact(1_000)), Determinism::Enforced, )); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); assert_noop!( Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash), @@ -3463,21 +3667,15 @@ fn remove_code_wrong_origin() { assert_eq!( System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { - who: ALICE, - amount: 173, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], - }, - ] + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE + }), + topics: vec![code_hash], + },] ); }); } @@ -3487,7 +3685,7 @@ fn remove_code_in_use() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( RuntimeOrigin::signed(ALICE), @@ -3516,7 +3714,7 @@ fn remove_code_not_found() { let (_wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Drop previous events initialize_block(2); @@ -3534,7 +3732,7 @@ fn remove_code_not_found() { fn instantiate_with_zero_balance_works() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); // Drop previous events @@ -3559,6 +3757,8 @@ fn instantiate_with_zero_balance_works() { // Check that the BOB contract has been instantiated. let contract = get_contract(&addr); let deposit_account = contract.deposit_account().deref(); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); // Make sure the account exists even though no free balance was send assert_eq!(::Currency::free_balance(&addr), min_balance); @@ -3567,6 +3767,15 @@ fn instantiate_with_zero_balance_works() { assert_eq!( System::events(), vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE + }), + topics: vec![code_hash], + }, EventRecord { phase: Phase::Initialization, event: RuntimeEvent::System(frame_system::Event::NewAccount { @@ -3615,19 +3824,6 @@ fn instantiate_with_zero_balance_works() { }), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { - who: ALICE, - amount: 173, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], - }, EventRecord { phase: Phase::Initialization, event: RuntimeEvent::Contracts(crate::Event::Instantiated { @@ -3645,7 +3841,7 @@ fn instantiate_with_zero_balance_works() { fn instantiate_with_below_existential_deposit_works() { let (wasm, code_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); // Drop previous events @@ -3670,7 +3866,8 @@ fn instantiate_with_below_existential_deposit_works() { // Check that the BOB contract has been instantiated. let contract = get_contract(&addr); let deposit_account = contract.deposit_account().deref(); - + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); // Make sure the account exists even though not enough free balance was send assert_eq!(::Currency::free_balance(&addr), min_balance + 50); assert_eq!(::Currency::total_balance(&addr), min_balance + 50); @@ -3678,6 +3875,15 @@ fn instantiate_with_below_existential_deposit_works() { assert_eq!( System::events(), vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE + }), + topics: vec![code_hash], + }, EventRecord { phase: Phase::Initialization, event: RuntimeEvent::System(frame_system::Event::NewAccount { @@ -3735,19 +3941,6 @@ fn instantiate_with_below_existential_deposit_works() { }), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { - who: ALICE, - amount: 173, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], - }, EventRecord { phase: Phase::Initialization, event: RuntimeEvent::Contracts(crate::Event::Instantiated { @@ -3765,7 +3958,7 @@ fn instantiate_with_below_existential_deposit_works() { fn storage_deposit_works() { let (wasm, _code_hash) = compile_module::("multi_store").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let mut deposit = ::Currency::minimum_balance(); let addr = Contracts::bare_instantiate( @@ -3904,7 +4097,7 @@ fn storage_deposit_callee_works() { let (wasm_callee, _code_hash_callee) = compile_module::("store_call").unwrap(); const ED: u64 = 200; ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Create both contracts: Constructors do nothing. let addr_caller = Contracts::bare_instantiate( @@ -3961,7 +4154,7 @@ fn set_code_extrinsic() { assert_ne!(code_hash, new_code_hash); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, @@ -4045,12 +4238,15 @@ fn set_code_extrinsic() { #[test] fn slash_cannot_kill_account() { let (wasm, _code_hash) = compile_module::("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + const ED: u64 = 200; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let value = 700; + let balance_held = 500; + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, - 700, + value, GAS_LIMIT, None, Code::Upload(wasm), @@ -4066,25 +4262,28 @@ fn slash_cannot_kill_account() { // Drop previous events initialize_block(2); - // Try to destroy the account of the contract by slashing. + // We need to hold some balances in order to have something to slash. As slashing can only + // affect balances held under certain HoldReason. + ::Currency::hold( + &HoldReason::CodeUploadDepositReserve.into(), + &addr, + balance_held, + ) + .unwrap(); + + assert_eq!(::Currency::total_balance_on_hold(&addr), balance_held); + + // Try to destroy the account of the contract by slashing the total balance. // The account does not get destroyed because of the consumer reference. // Slashing can for example happen if the contract takes part in staking. let _ = ::Currency::slash( + &HoldReason::CodeUploadDepositReserve.into(), &addr, ::Currency::total_balance(&addr), ); - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Slashed { - who: addr.clone(), - amount: 700, // slash didn't remove the minimum balance - }), - topics: vec![], - },] - ); + // Slashing only removed the balance held. + assert_eq!(::Currency::total_balance(&addr), value + ED - balance_held,); }); } @@ -4093,7 +4292,7 @@ fn contract_reverted() { let (wasm, code_hash) = compile_module::("return_with_data").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let flags = ReturnFlags::REVERT; let buffer = [4u8, 8, 15, 16, 23, 42]; let input = (flags.bits(), buffer).encode(); @@ -4206,7 +4405,7 @@ fn contract_reverted() { #[test] fn code_rejected_error_works() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let (wasm, _) = compile_module::("invalid_module").unwrap(); assert_noop!( @@ -4232,10 +4431,10 @@ fn code_rejected_error_works() { assert_err!(result.result, >::CodeRejected); assert_eq!( std::str::from_utf8(&result.debug_message).unwrap(), - "validation of new code failed" + "Can't load the module into wasmi!" ); - let (wasm, _) = compile_module::("invalid_contract").unwrap(); + let (wasm, _) = compile_module::("invalid_contract_no_call").unwrap(); assert_noop!( Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4262,6 +4461,34 @@ fn code_rejected_error_works() { std::str::from_utf8(&result.debug_message).unwrap(), "call function isn't exported" ); + + let (wasm, _) = compile_module::("invalid_contract_no_memory").unwrap(); + assert_noop!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Enforced + ), + >::CodeRejected, + ); + + let result = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + ); + assert_err!(result.result, >::CodeRejected); + assert_eq!( + std::str::from_utf8(&result.debug_message).unwrap(), + "No memory import found in the module" + ); }); } @@ -4271,7 +4498,7 @@ fn set_code_hash() { let (new_wasm, new_code_hash) = compile_module::("new_set_code_hash_contract").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' let contract_addr = Contracts::bare_instantiate( @@ -4372,11 +4599,29 @@ fn set_code_hash() { #[test] fn storage_deposit_limit_is_enforced() { + let ed = 200; let (wasm, _code_hash) = compile_module::("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(ed).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = ::Currency::minimum_balance(); + // Setting insufficient storage_deposit should fail. + assert_err!( + Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + Some((2 * ed + 3 - 1).into()), // expected deposit is 2 * ed + 3 for the call + Code::Upload(wasm.clone()), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result, + >::StorageDepositLimitExhausted, + ); + // Instantiate the BOB contract. let addr = Contracts::bare_instantiate( ALICE, @@ -4450,7 +4695,7 @@ fn deposit_limit_in_nested_calls() { compile_module::("create_storage_and_call").unwrap(); let (wasm_callee, _code_hash_callee) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Create both contracts: Constructors do nothing. let addr_caller = Contracts::bare_instantiate( @@ -4559,7 +4804,7 @@ fn deposit_limit_in_nested_calls() { >::StorageDepositLimitExhausted, ); - let _ = Balances::make_free_balance_be(&ALICE, 1_000); + let _ = ::Currency::set_balance(&ALICE, 1_000); // Require more than the sender's balance. // We don't set a special limit for the nested call. @@ -4596,8 +4841,8 @@ fn deposit_limit_in_nested_instantiate() { let (wasm_callee, code_hash_callee) = compile_module::("store_deploy").unwrap(); const ED: u64 = 5; ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 1_000_000); // Create caller contract let addr_caller = Contracts::bare_instantiate( ALICE, @@ -4746,9 +4991,11 @@ fn deposit_limit_in_nested_instantiate() { #[test] fn deposit_limit_honors_liquidity_restrictions() { let (wasm, _code_hash) = compile_module::("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); + const ED: u64 = 200; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let bobs_balance = 1_000; + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, bobs_balance); let min_balance = ::Currency::minimum_balance(); // Instantiate the BOB contract. @@ -4771,8 +5018,13 @@ fn deposit_limit_honors_liquidity_restrictions() { assert_eq!(get_contract(&addr).total_deposit(), min_balance); assert_eq!(::Currency::total_balance(&addr), min_balance); - // check that the lock ins honored - Balances::set_lock([0; 8], &BOB, 1_000, WithdrawReasons::TRANSFER); + // check that the hold is honored + ::Currency::hold( + &HoldReason::CodeUploadDepositReserve.into(), + &BOB, + bobs_balance - ED, + ) + .unwrap(); assert_err_ignore_postinfo!( Contracts::call( RuntimeOrigin::signed(BOB), @@ -4784,7 +5036,7 @@ fn deposit_limit_honors_liquidity_restrictions() { ), >::StorageDepositNotEnoughFunds, ); - assert_eq!(Balances::free_balance(&BOB), 1_000); + assert_eq!(::Currency::free_balance(&BOB), ED); }); } @@ -4792,8 +5044,8 @@ fn deposit_limit_honors_liquidity_restrictions() { fn deposit_limit_honors_existential_deposit() { let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 1_000); let min_balance = ::Currency::minimum_balance(); // Instantiate the BOB contract. @@ -4828,7 +5080,7 @@ fn deposit_limit_honors_existential_deposit() { ), >::StorageDepositNotEnoughFunds, ); - assert_eq!(Balances::free_balance(&BOB), 1_000); + assert_eq!(::Currency::free_balance(&BOB), 1_000); }); } @@ -4836,8 +5088,8 @@ fn deposit_limit_honors_existential_deposit() { fn deposit_limit_honors_min_leftover() { let (wasm, _code_hash) = compile_module::("store_call").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 1_000); let min_balance = ::Currency::minimum_balance(); // Instantiate the BOB contract. @@ -4872,7 +5124,7 @@ fn deposit_limit_honors_min_leftover() { ), >::StorageDepositNotEnoughFunds, ); - assert_eq!(Balances::free_balance(&BOB), 1_000); + assert_eq!(::Currency::free_balance(&BOB), 1_000); }); } @@ -4881,7 +5133,7 @@ fn cannot_instantiate_indeterministic_code() { let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); let (caller_wasm, _) = compile_module::("instantiate_return_code").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Try to instantiate directly from code assert_err_ignore_postinfo!( @@ -4930,6 +5182,7 @@ fn cannot_instantiate_indeterministic_code() { None, Determinism::Relaxed, )); + assert_err_ignore_postinfo!( Contracts::instantiate( RuntimeOrigin::signed(ALICE), @@ -5015,7 +5268,7 @@ fn cannot_set_code_indeterministic_code() { let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); let (caller_wasm, _) = compile_module::("set_code_hash").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Put the non deterministic contract on-chain assert_ok!(Contracts::upload_code( @@ -5065,7 +5318,7 @@ fn delegate_call_indeterministic_code() { let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); let (caller_wasm, _) = compile_module::("delegate_call_simple").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Put the non deterministic contract on-chain assert_ok!(Contracts::upload_code( @@ -5126,12 +5379,247 @@ fn delegate_call_indeterministic_code() { }); } +#[test] +fn add_remove_delegate_dependency_works() { + // set hash lock up deposit to 30%, to test deposit calculation. + CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + MAX_DELEGATE_DEPENDENCIES.with(|c| *c.borrow_mut() = 1); + + let (wasm_caller, self_code_hash) = + compile_module::("add_remove_delegate_dependency").unwrap(); + let (wasm_callee, code_hash) = compile_module::("dummy").unwrap(); + let (wasm_other, other_code_hash) = compile_module::("call").unwrap(); + + // Define inputs with various actions to test adding / removing delegate_dependencies. + // See the contract for more details. + let noop_input = (0u32, code_hash); + let add_delegate_dependency_input = (1u32, code_hash); + let remove_delegate_dependency_input = (2u32, code_hash); + let terminate_input = (3u32, code_hash); + + // Instantiate the caller contract with the given input. + let instantiate = |input: &(u32, H256)| { + Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm_caller.clone()), + input.encode(), + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + }; + + // Call contract with the given input. + let call = |addr_caller: &AccountId32, input: &(u32, H256)| { + >::bare_call( + ALICE, + addr_caller.clone(), + 0, + GAS_LIMIT, + None, + input.encode(), + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + Determinism::Enforced, + ) + }; + + const ED: u64 = 2000; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + + // Instantiate with add_delegate_dependency should fail since the code is not yet on chain. + assert_err!( + instantiate(&add_delegate_dependency_input).result, + Error::::CodeNotFound + ); + + // Upload the delegated code. + let CodeUploadReturnValue { deposit, .. } = + Contracts::bare_upload_code(ALICE, wasm_callee.clone(), None, Determinism::Enforced) + .unwrap(); + + // Instantiate should now work. + let addr_caller = instantiate(&add_delegate_dependency_input).result.unwrap().account_id; + + // There should be a dependency and a deposit. + let contract = test_utils::get_contract(&addr_caller); + + let dependency_deposit = &CodeHashLockupDepositPercent::get().mul_ceil(deposit); + assert_eq!(contract.delegate_dependencies().get(&code_hash), Some(dependency_deposit)); + assert_eq!(test_utils::get_balance(contract.deposit_account()), ED + dependency_deposit); + + // Removing the code should fail, since we have added a dependency. + assert_err!( + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + >::CodeInUse + ); + + // Adding an already existing dependency should fail. + assert_err!( + call(&addr_caller, &add_delegate_dependency_input).result, + Error::::DelegateDependencyAlreadyExists + ); + + // Adding a dependency to self should fail. + assert_err!( + call(&addr_caller, &(1u32, self_code_hash)).result, + Error::::CannotAddSelfAsDelegateDependency + ); + + // Adding more than the maximum allowed delegate_dependencies should fail. + Contracts::bare_upload_code(ALICE, wasm_other, None, Determinism::Enforced).unwrap(); + assert_err!( + call(&addr_caller, &(1u32, other_code_hash)).result, + Error::::MaxDelegateDependenciesReached + ); + + // Removing dependency should work. + assert_ok!(call(&addr_caller, &remove_delegate_dependency_input).result); + + // Dependency should be removed, and deposit should be returned. + let contract = test_utils::get_contract(&addr_caller); + assert!(contract.delegate_dependencies().is_empty()); + assert_eq!(test_utils::get_balance(contract.deposit_account()), ED); + + // Removing an unexisting dependency should fail. + assert_err!( + call(&addr_caller, &remove_delegate_dependency_input).result, + Error::::DelegateDependencyNotFound + ); + + // Adding a dependency with a storage limit too low should fail. + DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = dependency_deposit - 1); + assert_err!( + call(&addr_caller, &add_delegate_dependency_input).result, + Error::::StorageDepositLimitExhausted + ); + + // Since we removed the dependency we should now be able to remove the code. + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); + + // Calling should fail since the delegated contract is not on chain anymore. + assert_err!(call(&addr_caller, &noop_input).result, Error::::ContractTrapped); + + // Restore initial deposit limit and add the dependency back. + DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = 10_000_000); + Contracts::bare_upload_code(ALICE, wasm_callee, None, Determinism::Enforced).unwrap(); + call(&addr_caller, &add_delegate_dependency_input).result.unwrap(); + + // Call terminate should work, and return the deposit. + let balance_before = test_utils::get_balance(&ALICE); + assert_ok!(call(&addr_caller, &terminate_input).result); + assert_eq!(test_utils::get_balance(&ALICE), balance_before + 2 * ED + dependency_deposit); + + // Terminate should also remove the dependency, so we can remove the code. + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); + }); +} + +#[test] +fn native_dependency_deposit_works() { + let (wasm, code_hash) = compile_module::("set_code_hash").unwrap(); + let (dummy_wasm, dummy_code_hash) = compile_module::("dummy").unwrap(); + + // Set hash lock up deposit to 30%, to test deposit calculation. + CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + + // Set a low existential deposit so that the base storage deposit is based on the contract + // storage deposit rather than the existential deposit. + const ED: u64 = 10; + + // Test with both existing and uploaded code + for code in [Code::Upload(wasm.clone()), Code::Existing(code_hash)] { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let lockup_deposit_percent = CodeHashLockupDepositPercent::get(); + let per_byte = DepositPerByte::get(); + let per_item = DepositPerItem::get(); + + // Upload the dummy contract, + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + dummy_wasm.clone(), + None, + Determinism::Enforced, + ) + .unwrap(); + + // Upload `set_code_hash` contracts if using Code::Existing. + let add_upload_deposit = match code { + Code::Existing(_) => { + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Enforced, + ) + .unwrap(); + false + }, + Code::Upload(_) => true, + }; + + // Instantiate the set_code_hash contract. + let res = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + code, + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ); + + let addr = res.result.unwrap().account_id; + let info = ContractInfoOf::::get(&addr).unwrap(); + let info_len = info.encoded_size() as u64; + let base_deposit = ED + per_byte * info_len + per_item * 1; + let upload_deposit = test_utils::get_code_deposit(&code_hash); + let extra_deposit = add_upload_deposit.then(|| upload_deposit).unwrap_or_default(); + + // Check initial storage_deposit + // The base deposit should be: ED + info_len * per_byte + 1 * per_item + 30% * deposit + let deposit = + extra_deposit + base_deposit + lockup_deposit_percent.mul_ceil(upload_deposit); + + assert_eq!(res.storage_deposit.charge_or_zero(), deposit); + + // call set_code_hash + >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + dummy_code_hash.encode(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ) + .result + .unwrap(); + + // Check updated storage_deposit + let code_deposit = test_utils::get_code_deposit(&dummy_code_hash); + let deposit = base_deposit + lockup_deposit_percent.mul_ceil(code_deposit); + assert_eq!(test_utils::get_contract(&addr).storage_base_deposit(), deposit); + assert_eq!(test_utils::get_balance(&info.deposit_account()), deposit - ED); + }); + } +} + #[test] fn reentrance_count_works_with_call() { let (wasm, _code_hash) = compile_module::("reentrance_count_call").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let contract_addr = Contracts::bare_instantiate( ALICE, @@ -5172,7 +5660,7 @@ fn reentrance_count_works_with_delegated_call() { let (wasm, code_hash) = compile_module::("reentrance_count_delegated_call").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let contract_addr = Contracts::bare_instantiate( ALICE, @@ -5215,7 +5703,7 @@ fn account_reentrance_count_works() { compile_module::("reentrance_count_call").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let contract_addr = Contracts::bare_instantiate( ALICE, @@ -5331,7 +5819,7 @@ fn root_can_call() { let (wasm, _) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); let addr = Contracts::bare_instantiate( ALICE, @@ -5375,7 +5863,7 @@ fn root_cannot_instantiate_with_code() { vec![], vec![], ), - DispatchError::RootNotAllowed, + DispatchError::BadOrigin ); }); } diff --git a/frame/contracts/src/tests/pallet_dummy.rs b/frame/contracts/src/tests/pallet_dummy.rs new file mode 100644 index 0000000000000..7f8db53bf463f --- /dev/null +++ b/frame/contracts/src/tests/pallet_dummy.rs @@ -0,0 +1,36 @@ +pub use pallet::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::{ + dispatch::{Pays, PostDispatchInfo}, + ensure, + pallet_prelude::DispatchResultWithPostInfo, + weights::Weight, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + /// Dummy function that overcharges the predispatch weight, allowing us to test the correct + /// values of [`ContractResult::gas_consumed`] and [`ContractResult::gas_required`] in + /// tests. + #[pallet::call_index(1)] + #[pallet::weight(*pre_charge)] + pub fn overestimate_pre_charge( + origin: OriginFor, + pre_charge: Weight, + actual_weight: Weight, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + ensure!(pre_charge.any_gt(actual_weight), "pre_charge must be > actual_weight"); + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) + } + } +} diff --git a/frame/contracts/src/tests/unsafe_debug.rs b/frame/contracts/src/tests/unsafe_debug.rs new file mode 100644 index 0000000000000..160a6ed6dc54f --- /dev/null +++ b/frame/contracts/src/tests/unsafe_debug.rs @@ -0,0 +1,138 @@ +#![cfg(feature = "unsafe-debug")] + +use super::*; +use crate::unsafe_debug::{ExecutionObserver, ExportedFunction}; +use frame_support::traits::Currency; +use pallet_contracts_primitives::ExecReturnValue; +use pretty_assertions::assert_eq; +use std::cell::RefCell; + +#[derive(Clone, PartialEq, Eq, Debug)] +struct DebugFrame { + code_hash: CodeHash, + call: ExportedFunction, + input: Vec, + result: Option>, +} + +thread_local! { + static DEBUG_EXECUTION_TRACE: RefCell> = RefCell::new(Vec::new()); +} + +pub struct TestDebugger; + +impl ExecutionObserver> for TestDebugger { + fn before_call(code_hash: &CodeHash, entry_point: ExportedFunction, input_data: &[u8]) { + DEBUG_EXECUTION_TRACE.with(|d| { + d.borrow_mut().push(DebugFrame { + code_hash: code_hash.clone(), + call: entry_point, + input: input_data.to_vec(), + result: None, + }) + }); + } + + fn after_call( + code_hash: &CodeHash, + entry_point: ExportedFunction, + input_data: Vec, + output: &ExecReturnValue, + ) { + DEBUG_EXECUTION_TRACE.with(|d| { + d.borrow_mut().push(DebugFrame { + code_hash: code_hash.clone(), + call: entry_point, + input: input_data, + result: Some(output.data.clone()), + }) + }); + } +} + +#[test] +fn unsafe_debugging_works() { + let (wasm_caller, code_hash_caller) = compile_module::("call").unwrap(); + let (wasm_callee, code_hash_callee) = compile_module::("store_call").unwrap(); + + fn current_stack() -> Vec { + DEBUG_EXECUTION_TRACE.with(|stack| stack.borrow().clone()) + } + + fn deploy(wasm: Vec) -> AccountId32 { + Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id + } + + fn constructor_frame(hash: CodeHash, after: bool) -> DebugFrame { + DebugFrame { + code_hash: hash, + call: ExportedFunction::Constructor, + input: vec![], + result: if after { Some(vec![]) } else { None }, + } + } + + fn call_frame(hash: CodeHash, args: Vec, after: bool) -> DebugFrame { + DebugFrame { + code_hash: hash, + call: ExportedFunction::Call, + input: args, + result: if after { Some(vec![]) } else { None }, + } + } + + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + assert_eq!(current_stack(), vec![]); + + let addr_caller = deploy(wasm_caller); + let addr_callee = deploy(wasm_callee); + + assert_eq!( + current_stack(), + vec![ + constructor_frame(code_hash_caller, false), + constructor_frame(code_hash_caller, true), + constructor_frame(code_hash_callee, false), + constructor_frame(code_hash_callee, true), + ] + ); + + let main_args = (100u32, &addr_callee).encode(); + let inner_args = (100u32).encode(); + + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller, + 0, + GAS_LIMIT, + None, + main_args.clone() + )); + + let stack_top = current_stack()[4..].to_vec(); + assert_eq!( + stack_top, + vec![ + call_frame(code_hash_caller, main_args.clone(), false), + call_frame(code_hash_callee, inner_args.clone(), false), + call_frame(code_hash_callee, inner_args, true), + call_frame(code_hash_caller, main_args, true), + ] + ); + }); +} diff --git a/frame/contracts/src/unsafe_debug.rs b/frame/contracts/src/unsafe_debug.rs new file mode 100644 index 0000000000000..418af5e605d28 --- /dev/null +++ b/frame/contracts/src/unsafe_debug.rs @@ -0,0 +1,47 @@ +#![cfg(feature = "unsafe-debug")] + +pub use crate::exec::ExportedFunction; +use crate::{CodeHash, Vec}; +use pallet_contracts_primitives::ExecReturnValue; + +/// Umbrella trait for all interfaces that serves for debugging, but are not suitable for any +/// production or benchmarking use. +pub trait UnsafeDebug: ExecutionObserver> {} + +impl UnsafeDebug for D where D: ExecutionObserver> {} + +/// Defines the interface between pallet contracts and the outside observer. +/// +/// The intended use is the environment, where the observer holds directly the whole runtime +/// (externalities) and thus can react to the execution breakpoints synchronously. +/// +/// This definitely *should not* be used in any production or benchmarking setting, since handling +/// callbacks might be arbitrarily expensive and thus significantly influence performance. +pub trait ExecutionObserver { + /// Called just before the execution of a contract. + /// + /// # Arguments + /// + /// * `code_hash` - The code hash of the contract being called. + /// * `entry_point` - Describes whether the call is the constructor or a regular call. + /// * `input_data` - The raw input data of the call. + fn before_call(_code_hash: &CodeHash, _entry_point: ExportedFunction, _input_data: &[u8]) {} + + /// Called just after the execution of a contract. + /// + /// # Arguments + /// + /// * `code_hash` - The code hash of the contract being called. + /// * `entry_point` - Describes whether the call was the constructor or a regular call. + /// * `input_data` - The raw input data of the call. + /// * `output` - The raw output of the call. + fn after_call( + _code_hash: &CodeHash, + _entry_point: ExportedFunction, + _input_data: Vec, + _output: &ExecReturnValue, + ) { + } +} + +impl ExecutionObserver for () {} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs deleted file mode 100644 index 7dbce367ca96b..0000000000000 --- a/frame/contracts/src/wasm/code_cache.rs +++ /dev/null @@ -1,239 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A module that implements instrumented code cache. -//! -//! - In order to run contract code we need to instrument it with gas metering. -//! To do that we need to provide the schedule which will supply exact gas costs values. -//! We cache this code in the storage saving the schedule version. -//! - Before running contract code we check if the cached code has the schedule version that -//! is equal to the current saved schedule. -//! If it is equal then run the code, if it isn't reinstrument with the current schedule. -//! - When we update the schedule we want it to have strictly greater version than the current saved -//! one: -//! this guarantees that every instrumented contract code in cache cannot have the version equal to -//! the current one. Thus, before executing a contract it should be reinstrument with new schedule. - -use crate::{ - gas::{GasMeter, Token}, - wasm::{prepare, PrefabWasmModule}, - weights::WeightInfo, - CodeHash, CodeStorage, Config, Error, Event, OwnerInfoOf, Pallet, PristineCode, Schedule, - Weight, -}; -use frame_support::{ - dispatch::{DispatchError, DispatchResult}, - ensure, - traits::{Get, ReservableCurrency}, - WeakBoundedVec, -}; -use sp_runtime::traits::BadOrigin; -use sp_std::vec; - -/// Put the instrumented module in storage. -/// -/// Increments the refcount of the in-storage `prefab_module` if it already exists in storage -/// under the specified `code_hash`. -pub fn store(mut module: PrefabWasmModule, instantiated: bool) -> DispatchResult { - let code_hash = sp_std::mem::take(&mut module.code_hash); - >::mutate(&code_hash, |owner_info| { - match owner_info { - // Instantiate existing contract. - // - // No need to update the `CodeStorage` as any re-instrumentation eagerly saves - // the re-instrumented code. - Some(owner_info) if instantiated => { - owner_info.refcount = owner_info.refcount.checked_add(1).expect( - " - refcount is 64bit. Generating this overflow would require to store - _at least_ 18 exabyte of data assuming that a contract consumes only - one byte of data. Any node would run out of storage space before hitting - this overflow. - qed - ", - ); - Ok(()) - }, - // Re-upload existing contract without executing it. - // - // We are careful here to just overwrite the code to not include it into the PoV. - // We do this because the uploaded code was instrumented with the latest schedule - // and hence we persist those changes. Otherwise the next execution will pay again - // for the instrumentation. - Some(_) => { - >::insert(&code_hash, module); - Ok(()) - }, - // Upload a new contract. - // - // We need to write all data structures and collect the deposit. - None => { - let orig_code = module.original_code.take().expect( - " - If an executable isn't in storage it was uploaded. - If it was uploaded the original code must exist. qed - ", - ); - let mut new_owner_info = module.owner_info.take().expect( - "If an executable isn't in storage it was uploaded. - If it was uploaded the owner info was generated and attached. qed - ", - ); - // This `None` case happens only in freshly uploaded modules. This means that - // the `owner` is always the origin of the current transaction. - T::Currency::reserve(&new_owner_info.owner, new_owner_info.deposit) - .map_err(|_| >::StorageDepositNotEnoughFunds)?; - new_owner_info.refcount = if instantiated { 1 } else { 0 }; - >::insert(&code_hash, orig_code); - >::insert(&code_hash, module); - *owner_info = Some(new_owner_info); - >::deposit_event(vec![code_hash], Event::CodeStored { code_hash }); - Ok(()) - }, - } - }) -} - -/// Decrement the refcount of a code in-storage by one. -/// -/// # Note -/// -/// A contract whose refcount dropped to zero isn't automatically removed. A `remove_code` -/// transaction must be submitted by the original uploader to do so. -pub fn decrement_refcount(code_hash: CodeHash) { - >::mutate(code_hash, |existing| { - if let Some(info) = existing { - info.refcount = info.refcount.saturating_sub(1); - } - }); -} - -/// Increment the refcount of a code in-storage by one. -/// -/// # Errors -/// -/// [`Error::CodeNotFound`] is returned if the specified `code_hash` does not exist. -pub fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - >::mutate(code_hash, |existing| -> Result<(), DispatchError> { - if let Some(info) = existing { - info.refcount = info.refcount.saturating_add(1); - Ok(()) - } else { - Err(Error::::CodeNotFound.into()) - } - }) -} - -/// Try to remove code together with all associated information. -pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { - >::try_mutate_exists(&code_hash, |existing| { - if let Some(owner_info) = existing { - ensure!(owner_info.refcount == 0, >::CodeInUse); - ensure!(&owner_info.owner == origin, BadOrigin); - T::Currency::unreserve(&owner_info.owner, owner_info.deposit); - *existing = None; - >::remove(&code_hash); - >::remove(&code_hash); - >::deposit_event(vec![code_hash], Event::CodeRemoved { code_hash }); - Ok(()) - } else { - Err(>::CodeNotFound.into()) - } - }) -} - -/// Load code with the given code hash. -/// -/// If the module was instrumented with a lower version of schedule than -/// the current one given as an argument, then this function will perform -/// re-instrumentation and update the cache in the storage. -pub fn load( - code_hash: CodeHash, - schedule: &Schedule, - gas_meter: &mut GasMeter, -) -> Result, DispatchError> { - let max_code_len = T::MaxCodeLen::get(); - let charged = gas_meter.charge(CodeToken::Load(max_code_len))?; - - let mut prefab_module = >::get(code_hash).ok_or(Error::::CodeNotFound)?; - let instrumented_code_len = prefab_module.code.len() as u32; - gas_meter.adjust_gas(charged, CodeToken::Load(instrumented_code_len)); - prefab_module.code_hash = code_hash; - - if prefab_module.instruction_weights_version < schedule.instruction_weights.version { - // The instruction weights have changed. - // We need to re-instrument the code with the new instruction weights. - let charged = gas_meter.charge(CodeToken::Reinstrument(instrumented_code_len))?; - let orig_code_len = reinstrument(&mut prefab_module, schedule)?; - gas_meter.adjust_gas(charged, CodeToken::Reinstrument(orig_code_len)); - } - - Ok(prefab_module) -} - -/// Instruments the passed prefab wasm module with the supplied schedule. -/// -/// Returns the size in bytes of the uninstrumented code. -pub fn reinstrument( - prefab_module: &mut PrefabWasmModule, - schedule: &Schedule, -) -> Result { - let original_code = - >::get(&prefab_module.code_hash).ok_or(Error::::CodeNotFound)?; - let original_code_len = original_code.len(); - // We need to allow contracts growing too big after re-instrumentation. Otherwise - // the contract can become inaccessible. The user has no influence over this size - // as the contract is already deployed and every change in size would be the result - // of changes in the instrumentation algorithm controlled by the chain authors. - prefab_module.code = WeakBoundedVec::force_from( - prepare::reinstrument::( - &original_code, - schedule, - prefab_module.determinism, - )?, - Some("Contract exceeds size limit after re-instrumentation."), - ); - prefab_module.instruction_weights_version = schedule.instruction_weights.version; - >::insert(&prefab_module.code_hash, &*prefab_module); - Ok(original_code_len as u32) -} - -/// Costs for operations that are related to code handling. -#[cfg_attr(test, derive(Debug, PartialEq, Eq))] -#[derive(Clone, Copy)] -enum CodeToken { - /// Weight for reinstrumenting a contract contract of the supplied size in bytes. - Reinstrument(u32), - /// Weight for loading a contract per byte. - Load(u32), -} - -impl Token for CodeToken { - fn weight(&self) -> Weight { - use self::CodeToken::*; - // In case of `Load` we already covered the general costs of - // calling the storage but still need to account for the actual size of the - // contract code. This is why we subtract `T::*::(0)`. We need to do this at this - // point because when charging the general weight for calling the contract we not know the - // size of the contract. - match *self { - Reinstrument(len) => T::WeightInfo::reinstrument(len), - Load(len) => T::WeightInfo::call_with_code_per_byte(len) - .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)), - } - } -} diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 224c116946aa4..291f39f7fa797 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -18,104 +18,82 @@ //! This module provides a means for executing contracts //! represented in wasm. -mod code_cache; mod prepare; mod runtime; -#[cfg(feature = "runtime-benchmarks")] -pub use crate::wasm::code_cache::reinstrument; - #[cfg(doc)] pub use crate::wasm::runtime::api_doc; #[cfg(test)] pub use tests::MockExt; -pub use crate::wasm::{ - prepare::TryInstantiate, - runtime::{ - AllowDeprecatedInterface, AllowUnstableInterface, CallFlags, Environment, ReturnCode, - Runtime, RuntimeCosts, - }, +pub use crate::wasm::runtime::{ + AllowDeprecatedInterface, AllowUnstableInterface, CallFlags, Environment, ReturnCode, Runtime, + RuntimeCosts, }; use crate::{ exec::{ExecResult, Executable, ExportedFunction, Ext}, - gas::GasMeter, - AccountIdOf, BalanceOf, CodeHash, CodeVec, Config, Error, OwnerInfoOf, RelaxedCodeVec, - Schedule, LOG_TARGET, + gas::{GasMeter, Token}, + wasm::prepare::LoadedModule, + weights::WeightInfo, + AccountIdOf, BadOrigin, BalanceOf, CodeHash, CodeInfoOf, CodeVec, Config, Error, Event, + HoldReason, Pallet, PristineCode, Schedule, Weight, LOG_TARGET, }; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::dispatch::{DispatchError, DispatchResult}; +use frame_support::{ + dispatch::{DispatchError, DispatchResult}, + ensure, + traits::{fungible::MutateHold, tokens::Precision::BestEffort}, +}; use sp_core::Get; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; -use wasmi::{ - Config as WasmiConfig, Engine, Instance, Linker, Memory, MemoryType, Module, StackLimits, Store, -}; +use wasmi::{Instance, Linker, Memory, MemoryType, StackLimits, Store}; -/// A prepared wasm module ready for execution. -/// -/// # Note -/// -/// This data structure is mostly immutable once created and stored. The exceptions that -/// can be changed by calling a contract are `instruction_weights_version` and `code`. -/// `instruction_weights_version` and `code` change when a contract with an outdated instrumentation -/// is called. Therefore one must be careful when holding any in-memory representation of this -/// type while calling into a contract as those fields can get out of date. -#[derive(Clone, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +const BYTES_PER_PAGE: usize = 64 * 1024; + +/// Validated Wasm module ready for execution. +/// This data structure is immutable once created and stored. +#[derive(Encode, Decode, scale_info::TypeInfo)] #[codec(mel_bound())] #[scale_info(skip_type_params(T))] -pub struct PrefabWasmModule { - /// Version of the instruction weights with which the code was instrumented. - #[codec(compact)] - instruction_weights_version: u32, - /// Initial memory size of a contract's sandbox. - #[codec(compact)] - initial: u32, - /// The maximum memory size of a contract's sandbox. - #[codec(compact)] - maximum: u32, - /// Code instrumented with the latest schedule. - code: RelaxedCodeVec, - /// A code that might contain non deterministic features and is therefore never allowed - /// to be run on chain. Specifically this code can never be instantiated into a contract - /// and can just be used through a delegate call. - determinism: Determinism, - /// The uninstrumented, pristine version of the code. - /// - /// It is not stored because the pristine code has its own storage item. The value - /// is only `Some` when this module was created from an `original_code` and `None` if - /// it was loaded from storage. +pub struct WasmBlob { + code: CodeVec, + // This isn't needed for contract execution and is not stored alongside it. #[codec(skip)] - original_code: Option>, - /// The code hash of the stored code which is defined as the hash over the `original_code`. - /// - /// As the map key there is no need to store the hash in the value, too. It is set manually - /// when loading the module from storage. + code_info: CodeInfo, + // This is for not calculating the hash every time we need it. #[codec(skip)] code_hash: CodeHash, - // This isn't needed for contract execution and does not get loaded from storage by default. - // It is `Some` if and only if this struct was generated from code. - #[codec(skip)] - owner_info: Option>, } -/// Information that belongs to a [`PrefabWasmModule`] but is stored separately. +/// Contract code related data, such as: +/// +/// - owner of the contract, i.e. account uploaded its code, +/// - storage deposit amount, +/// - reference count, +/// - determinism marker. /// /// It is stored in a separate storage entry to avoid loading the code when not necessary. #[derive(Clone, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] #[codec(mel_bound())] #[scale_info(skip_type_params(T))] -pub struct OwnerInfo { - /// The account that has deployed the contract and hence is allowed to remove it. +pub struct CodeInfo { + /// The account that has uploaded the contract code and hence is allowed to remove it. owner: AccountIdOf, - /// The amount of balance that was deposited by the owner in order to deploy it. + /// The amount of balance that was deposited by the owner in order to store it on-chain. #[codec(compact)] deposit: BalanceOf, - /// The number of contracts that use this as their code. + /// The number of instantiated contracts that use this as their code. #[codec(compact)] refcount: u64, + /// Marks if the code might contain non-deterministic features and is therefore never allowed + /// to be run on-chain. Specifically, such a code can never be instantiated into a contract + /// and can just be used through a delegate call. + determinism: Determinism, + /// length of the code in bytes. + code_len: u32, } /// Defines the required determinism level of a wasm blob when either running or uploading code. @@ -149,83 +127,65 @@ impl ExportedFunction { } } -impl PrefabWasmModule { - /// Create the module by checking and instrumenting `original_code`. - /// - /// This does **not** store the module. For this one need to either call [`Self::store`] - /// or [`::execute`][`Executable::execute`]. +/// Cost of code loading from storage. +#[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(Clone, Copy)] +struct CodeLoadToken(u32); + +impl Token for CodeLoadToken { + fn weight(&self) -> Weight { + // When loading the contract, we already covered the general costs of + // calling the storage but still need to account for the actual size of the + // contract code. This is why we subtract `T::*::(0)`. We need to do this at this + // point because when charging the general weight for calling the contract we don't know the + // size of the contract. + T::WeightInfo::call_with_code_per_byte(self.0) + .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) + } +} + +impl WasmBlob { + /// Create the module by checking the `code`. pub fn from_code( - original_code: Vec, + code: Vec, schedule: &Schedule, owner: AccountIdOf, determinism: Determinism, - try_instantiate: TryInstantiate, ) -> Result { - let module = prepare::prepare::( - original_code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, + prepare::prepare::( + code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, schedule, owner, determinism, - try_instantiate, - )?; - Ok(module) - } - - /// Store the code without instantiating it. - /// - /// Otherwise the code is stored when [`::execute`][`Executable::execute`] - /// is called. - pub fn store(self) -> DispatchResult { - code_cache::store(self, false) + ) } /// Remove the code from storage and refund the deposit to its owner. /// /// Applies all necessary checks before removing the code. pub fn remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { - code_cache::try_remove::(origin, code_hash) - } - - /// Returns whether there is a deposit to be paid for this module. - /// - /// Returns `0` if the module is already in storage and hence no deposit will - /// be charged when storing it. - pub fn open_deposit(&self) -> BalanceOf { - if >::contains_key(&self.code_hash) { - 0u32.into() - } else { - // Only already in-storage contracts have their `owner_info` set to `None`. - // Therefore it is correct to return `0` in this case. - self.owner_info.as_ref().map(|i| i.deposit).unwrap_or_default() - } + Self::try_remove_code(origin, code_hash) } /// Creates and returns an instance of the supplied code. /// /// This is either used for later executing a contract or for validation of a contract. /// When validating we pass `()` as `host_state`. Please note that such a dummy instance must - /// **never** be called/executed since it will panic the executor. + /// **never** be called/executed, since it will panic the executor. pub fn instantiate( code: &[u8], host_state: H, - memory: (u32, u32), + schedule: &Schedule, + determinism: Determinism, stack_limits: StackLimits, allow_deprecated: AllowDeprecatedInterface, - ) -> Result<(Store, Memory, Instance), wasmi::Error> + ) -> Result<(Store, Memory, Instance), &'static str> where E: Environment, { - let mut config = WasmiConfig::default(); - config - .set_stack_limits(stack_limits) - .wasm_multi_value(false) - .wasm_mutable_global(false) - .wasm_sign_extension(false) - .wasm_saturating_float_to_int(false); - let engine = Engine::new(&config); - let module = Module::new(&engine, code)?; - let mut store = Store::new(&engine, host_state); - let mut linker = Linker::new(&engine); + let contract = LoadedModule::new::(&code, determinism, Some(stack_limits))?; + let mut store = Store::new(&contract.engine, host_state); + let mut linker = Linker::new(&contract.engine); E::define( &mut store, &mut linker, @@ -235,77 +195,178 @@ impl PrefabWasmModule { AllowUnstableInterface::No }, allow_deprecated, - )?; - let memory = Memory::new(&mut store, MemoryType::new(memory.0, Some(memory.1))?).expect( - "The limits defined in our `Schedule` limit the amount of memory well below u32::MAX; qed", - ); + ) + .map_err(|_| "can't define host functions to Linker")?; + + // Query wasmi for memory limits specified in the module's import entry. + let memory_limits = contract.scan_imports::(schedule)?; + // Here we allocate this memory in the _store_. It allocates _inital_ value, but allows it + // to grow up to maximum number of memory pages, if necessary. + let qed = "We checked the limits versus our Schedule, + which specifies the max amount of memory pages + well below u16::MAX; qed"; + let memory = Memory::new( + &mut store, + MemoryType::new(memory_limits.0, Some(memory_limits.1)).expect(qed), + ) + .expect(qed); + linker .define("env", "memory", memory) - .expect("We just created the linker. It has no define with this name attached; qed"); + .expect("We just created the Linker. It has no definitions with this name; qed"); - let instance = linker.instantiate(&mut store, &module)?.ensure_no_start(&mut store)?; + let instance = linker + .instantiate(&mut store, &contract.module) + .map_err(|_| "can't instantiate module with provided definitions")? + .ensure_no_start(&mut store) + .map_err(|_| "start function is forbidden but found in the module")?; Ok((store, memory, instance)) } - /// See [`Self::from_code_unchecked`]. - #[cfg(feature = "runtime-benchmarks")] - pub fn store_code_unchecked( - original_code: Vec, - schedule: &Schedule, - owner: T::AccountId, - ) -> DispatchResult { - let executable = Self::from_code_unchecked(original_code, schedule, owner)?; - code_cache::store(executable, false) + /// Puts the module blob into storage, and returns the deposit collected for the storage. + pub fn store_code(&mut self) -> Result, Error> { + let code_hash = *self.code_hash(); + >::mutate(code_hash, |stored_code_info| { + match stored_code_info { + // Contract code is already stored in storage. Nothing to be done here. + Some(_) => Ok(Default::default()), + // Upload a new contract code. + // We need to store the code and its code_info, and collect the deposit. + // This `None` case happens only with freshly uploaded modules. This means that + // the `owner` is always the origin of the current transaction. + None => { + let deposit = self.code_info.deposit; + T::Currency::hold( + &HoldReason::CodeUploadDepositReserve.into(), + &self.code_info.owner, + deposit, + ) + .map_err(|_| >::StorageDepositNotEnoughFunds)?; + + self.code_info.refcount = 0; + >::insert(code_hash, &self.code); + *stored_code_info = Some(self.code_info.clone()); + >::deposit_event( + vec![code_hash], + Event::CodeStored { + code_hash, + deposit_held: deposit, + uploader: self.code_info.owner.clone(), + }, + ); + Ok(deposit) + }, + } + }) + } + + /// Try to remove code together with all associated information. + fn try_remove_code(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { + >::try_mutate_exists(&code_hash, |existing| { + if let Some(code_info) = existing { + ensure!(code_info.refcount == 0, >::CodeInUse); + ensure!(&code_info.owner == origin, BadOrigin); + let _ = T::Currency::release( + &HoldReason::CodeUploadDepositReserve.into(), + &code_info.owner, + code_info.deposit, + BestEffort, + ); + let deposit_released = code_info.deposit; + let remover = code_info.owner.clone(); + + *existing = None; + >::remove(&code_hash); + >::deposit_event( + vec![code_hash], + Event::CodeRemoved { code_hash, deposit_released, remover }, + ); + Ok(()) + } else { + Err(>::CodeNotFound.into()) + } + }) } - /// Decrement instruction_weights_version by 1. Panics if it is already 0. - #[cfg(test)] - pub fn decrement_version(&mut self) { - self.instruction_weights_version = self.instruction_weights_version.checked_sub(1).unwrap(); + /// Load code with the given code hash. + fn load_code( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result<(CodeVec, CodeInfo), DispatchError> { + let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; + gas_meter.charge(CodeLoadToken(code_info.code_len))?; + let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; + Ok((code, code_info)) } - /// Create the module without checking nor instrumenting the passed code. + /// Create the module without checking the passed code. /// /// # Note /// - /// This is useful for benchmarking where we don't want instrumentation to skew + /// This is useful for benchmarking where we don't want validation of the module to skew /// our results. This also does not collect any deposit from the `owner`. Also useful /// during testing when we want to deploy codes that do not pass the instantiation checks. #[cfg(any(test, feature = "runtime-benchmarks"))] - fn from_code_unchecked( - original_code: Vec, + pub fn from_code_unchecked( + code: Vec, schedule: &Schedule, owner: T::AccountId, ) -> Result { - prepare::benchmarking::prepare(original_code, schedule, owner) - .map_err::(Into::into) + prepare::benchmarking::prepare(code, schedule, owner) } } -impl OwnerInfo { +impl CodeInfo { /// Return the refcount of the module. #[cfg(test)] pub fn refcount(&self) -> u64 { self.refcount } + + #[cfg(test)] + pub fn new(owner: T::AccountId) -> Self { + CodeInfo { + owner, + deposit: Default::default(), + refcount: 0, + code_len: 0, + determinism: Determinism::Enforced, + } + } + + /// Returns the deposit of the module. + pub fn deposit(&self) -> BalanceOf { + self.deposit + } } -impl Executable for PrefabWasmModule { +impl Executable for WasmBlob { fn from_storage( code_hash: CodeHash, - schedule: &Schedule, gas_meter: &mut GasMeter, ) -> Result { - code_cache::load(code_hash, schedule, gas_meter) + let (code, code_info) = Self::load_code(code_hash, gas_meter)?; + Ok(Self { code, code_info, code_hash }) } - fn add_user(code_hash: CodeHash) -> Result<(), DispatchError> { - code_cache::increment_refcount::(code_hash) + fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { + >::mutate(code_hash, |existing| -> Result<(), DispatchError> { + if let Some(info) = existing { + info.refcount = info.refcount.saturating_add(1); + Ok(()) + } else { + Err(Error::::CodeNotFound.into()) + } + }) } - fn remove_user(code_hash: CodeHash) { - code_cache::decrement_refcount::(code_hash) + fn decrement_refcount(code_hash: CodeHash) { + >::mutate(code_hash, |existing| { + if let Some(info) = existing { + info.refcount = info.refcount.saturating_sub(1); + } + }); } fn execute>( @@ -314,23 +375,41 @@ impl Executable for PrefabWasmModule { function: &ExportedFunction, input_data: Vec, ) -> ExecResult { + let code = self.code.as_slice(); + // Instantiate the Wasm module to the engine. let runtime = Runtime::new(ext, input_data); + let schedule = ::Schedule::get(); let (mut store, memory, instance) = Self::instantiate::( - self.code.as_slice(), + code, runtime, - (self.initial, self.maximum), + &schedule, + self.code_info.determinism, StackLimits::default(), match function { - ExportedFunction::Constructor => AllowDeprecatedInterface::No, ExportedFunction::Call => AllowDeprecatedInterface::Yes, + ExportedFunction::Constructor => AllowDeprecatedInterface::No, }, ) .map_err(|msg| { - log::debug!(target: LOG_TARGET, "failed to instantiate code: {}", msg); + log::debug!(target: LOG_TARGET, "failed to instantiate code to wasmi: {}", msg); Error::::CodeRejected })?; store.data_mut().set_memory(memory); + // Set fuel limit for the wasmi execution. + // We normalize it by the base instruction weight, as its cost in wasmi engine is `1`. + let fuel_limit = store + .data_mut() + .ext() + .gas_meter_mut() + .gas_left() + .ref_time() + .checked_div(T::Schedule::get().instruction_weights.base as u64) + .ok_or(Error::::InvalidSchedule)?; + store + .add_fuel(fuel_limit) + .expect("We've set up engine to fuel consuming mode; qed"); + let exported_func = instance .get_export(&store, function.identifier()) .and_then(|export| export.into_func()) @@ -339,12 +418,15 @@ impl Executable for PrefabWasmModule { Error::::CodeRejected })?; - // We store before executing so that the code hash is available in the constructor. if let &ExportedFunction::Constructor = function { - code_cache::store(self, true)?; + WasmBlob::::increment_refcount(self.code_hash)?; } let result = exported_func.call(&mut store, &[], &mut []); + let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); + // Sync this frame's gas meter with the engine's one. + let gas_meter = store.data_mut().ext().gas_meter_mut(); + gas_meter.charge_fuel(engine_consumed_total)?; store.into_data().to_execution_result(result) } @@ -353,12 +435,16 @@ impl Executable for PrefabWasmModule { &self.code_hash } + fn code_info(&self) -> &CodeInfo { + &self.code_info + } + fn code_len(&self) -> u32 { self.code.len() as u32 } fn is_deterministic(&self) -> bool { - matches!(self.determinism, Determinism::Enforced) + matches!(self.code_info.determinism, Determinism::Enforced) } } @@ -366,7 +452,7 @@ impl Executable for PrefabWasmModule { mod tests { use super::*; use crate::{ - exec::{AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, Key, SeedOf}, + exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Ext, Key, SeedOf}, gas::GasMeter, storage::WriteOutcome, tests::{RuntimeCall, Test, ALICE, BOB}, @@ -376,6 +462,7 @@ mod tests { use frame_support::{ assert_err, assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight, }; + use frame_system::pallet_prelude::BlockNumberFor; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; use sp_core::H256; @@ -383,7 +470,10 @@ mod tests { use std::{ borrow::BorrowMut, cell::RefCell, - collections::hash_map::{Entry, HashMap}, + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, }; #[derive(Debug, PartialEq, Eq)] @@ -437,6 +527,7 @@ mod tests { sr25519_verify: RefCell, [u8; 32])>>, code_hashes: Vec>, caller: Origin, + delegate_dependencies: RefCell>>, } /// The call is mocked and just returns this hardcoded value. @@ -462,6 +553,7 @@ mod tests { ecdsa_recover: Default::default(), caller: Default::default(), sr25519_verify: Default::default(), + delegate_dependencies: Default::default(), } } } @@ -581,7 +673,7 @@ mod tests { fn minimum_balance(&self) -> u64 { 666 } - fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberFor) { (H256::from_slice(subject), 42) } fn deposit_event(&mut self, topics: Vec, data: Vec) { @@ -603,9 +695,13 @@ mod tests { fn schedule(&self) -> &Schedule { &self.schedule } - fn gas_meter(&mut self) -> &mut GasMeter { + fn gas_meter(&self) -> &GasMeter { + &self.gas_meter + } + fn gas_meter_mut(&mut self) -> &mut GasMeter { &mut self.gas_meter } + fn charge_storage(&mut self, _diff: &crate::storage::meter::Diff) {} fn append_debug_buffer(&mut self, msg: &str) -> bool { self.debug_buffer.extend(msg.as_bytes()); true @@ -644,6 +740,22 @@ mod tests { fn nonce(&mut self) -> u64 { 995 } + + fn add_delegate_dependency( + &mut self, + code: CodeHash, + ) -> Result<(), DispatchError> { + self.delegate_dependencies.borrow_mut().insert(code); + Ok(()) + } + + fn remove_delegate_dependency( + &mut self, + code: &CodeHash, + ) -> Result<(), DispatchError> { + self.delegate_dependencies.borrow_mut().remove(code); + Ok(()) + } } /// Execute the supplied code. @@ -660,16 +772,18 @@ mod tests { type RuntimeConfig = ::T; RuntimeConfig::set_unstable_interface(unstable_interface); let wasm = wat::parse_str(wat).unwrap(); - let schedule = crate::Schedule::default(); let executable = if skip_checks { - PrefabWasmModule::::from_code_unchecked(wasm, &schedule, ALICE)? + WasmBlob::::from_code_unchecked( + wasm, + ext.borrow_mut().schedule(), + ALICE, + )? } else { - PrefabWasmModule::::from_code( + WasmBlob::::from_code( wasm, - &schedule, + ext.borrow_mut().schedule(), ALICE, Determinism::Enforced, - TryInstantiate::Instantiate, ) .map_err(|err| err.0)? }; @@ -2059,7 +2173,7 @@ mod tests { ExecReturnValue { flags: ReturnFlags::empty(), data: ( - array_bytes::hex2array_unchecked::<32>( + array_bytes::hex2array_unchecked::<_, 32>( "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" ), 42u64, @@ -3140,6 +3254,8 @@ mod tests { const CODE: &str = r#" (module (import "seal0" "instantiation_nonce" (func $nonce (result i64))) + (import "env" "memory" (memory 1 1)) + (func $assert (param i32) (block $ok (br_if $ok @@ -3161,25 +3277,6 @@ mod tests { execute(CODE, vec![], &mut mock_ext).unwrap(); } - /// Code with deprecated functions cannot be uploaded or instantiated. However, we - /// need to make sure that it still can be re-instrumented. - #[test] - fn can_reinstrument_deprecated() { - const CODE_RANDOM: &str = r#" -(module - (import "seal0" "random" (func $seal_random (param i32 i32 i32 i32))) - (func (export "call")) - (func (export "deploy")) -) - "#; - let wasm = wat::parse_str(CODE_RANDOM).unwrap(); - let schedule = crate::Schedule::::default(); - #[cfg(not(feature = "runtime-benchmarks"))] - assert_err!(execute(CODE_RANDOM, vec![], MockExt::default()), >::CodeRejected); - self::prepare::reinstrument::(&wasm, &schedule, Determinism::Enforced) - .unwrap(); - } - /// This test check that an unstable interface cannot be deployed. In case of runtime /// benchmarks we always allow unstable interfaces. This is why this test does not /// work when this feature is enabled. @@ -3189,6 +3286,8 @@ mod tests { const CANNOT_DEPLOY_UNSTABLE: &str = r#" (module (import "seal0" "reentrance_count" (func $reentrance_count (result i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -3209,6 +3308,8 @@ mod tests { const CODE_RANDOM_0: &str = r#" (module (import "seal0" "seal_random" (func $seal_random (param i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -3216,6 +3317,8 @@ mod tests { const CODE_RANDOM_1: &str = r#" (module (import "seal1" "seal_random" (func $seal_random (param i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -3223,6 +3326,8 @@ mod tests { const CODE_RANDOM_2: &str = r#" (module (import "seal0" "random" (func $seal_random (param i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -3230,6 +3335,8 @@ mod tests { const CODE_RANDOM_3: &str = r#" (module (import "seal1" "random" (func $seal_random (param i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -3275,4 +3382,39 @@ mod tests { >::CodeRejected, ); } + + #[test] + fn add_remove_delegate_dependency() { + const CODE_ADD_REMOVE_DELEGATE_DEPENDENCY: &str = r#" +(module + (import "seal0" "add_delegate_dependency" (func $add_delegate_dependency (param i32))) + (import "seal0" "remove_delegate_dependency" (func $remove_delegate_dependency (param i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (call $add_delegate_dependency (i32.const 0)) + (call $add_delegate_dependency (i32.const 32)) + (call $remove_delegate_dependency (i32.const 32)) + ) + (func (export "deploy")) + + ;; hash1 (32 bytes) + (data (i32.const 0) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; hash2 (32 bytes) + (data (i32.const 32) + "\02\02\02\02\02\02\02\02\02\02\02\02\02\02\02\02" + "\02\02\02\02\02\02\02\02\02\02\02\02\02\02\02\02" + ) +) +"#; + let mut mock_ext = MockExt::default(); + assert_ok!(execute(&CODE_ADD_REMOVE_DELEGATE_DEPENDENCY, vec![], &mut mock_ext)); + let delegate_dependencies: Vec<_> = + mock_ext.delegate_dependencies.into_inner().into_iter().collect(); + assert_eq!(delegate_dependencies.len(), 1); + assert_eq!(delegate_dependencies[0].as_bytes(), [1; 32]); + } } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 14fec834733eb..b129c17e13eca 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -23,161 +23,68 @@ use crate::{ chain_extension::ChainExtension, storage::meter::Diff, wasm::{ - runtime::AllowDeprecatedInterface, Determinism, Environment, OwnerInfo, PrefabWasmModule, + runtime::AllowDeprecatedInterface, CodeInfo, Determinism, Environment, WasmBlob, + BYTES_PER_PAGE, }, AccountIdOf, CodeVec, Config, Error, Schedule, LOG_TARGET, }; -use codec::{Encode, MaxEncodedLen}; +use codec::MaxEncodedLen; use sp_runtime::{traits::Hash, DispatchError}; -use sp_std::prelude::*; -use wasm_instrument::{ - gas_metering, - parity_wasm::elements::{self, External, Internal, MemoryType, Type, ValueType}, +#[cfg(any(test, feature = "runtime-benchmarks"))] +use sp_std::prelude::Vec; +use wasmi::{ + core::ValueType as WasmiValueType, Config as WasmiConfig, Engine, ExternType, + FuelConsumptionMode, Module, StackLimits, }; -use wasmi::StackLimits; -use wasmparser::{Validator, WasmFeatures}; /// Imported memory must be located inside this module. The reason for hardcoding is that current /// compiler toolchains might not support specifying other modules than "env" for memory imports. pub const IMPORT_MODULE_MEMORY: &str = "env"; -/// Determines whether a module should be instantiated during preparation. -pub enum TryInstantiate { - /// Do the instantiation to make sure that the module is valid. - /// - /// This should be used if a module is only uploaded but not executed. We need - /// to make sure that it can be actually instantiated. - Instantiate, - /// Skip the instantiation during preparation. - /// - /// This makes sense when the preparation takes place as part of an instantiation. Then - /// this instantiation would fail the whole transaction and an extra check is not - /// necessary. - Skip, -} - -/// The reason why a contract is instrumented. -enum InstrumentReason { - /// A new code is uploaded. - New, - /// Existing code is re-instrumented. - Reinstrument, +/// The inner deserialized module is valid and contains only allowed WebAssembly features. +/// This is checked by loading it into wasmi interpreter `engine`. +pub struct LoadedModule { + pub module: Module, + pub engine: Engine, } -struct ContractModule<'a, T: Config> { - /// A deserialized module. The module is valid (this is Guaranteed by `new` method). - module: elements::Module, - schedule: &'a Schedule, -} - -impl<'a, T: Config> ContractModule<'a, T> { - /// Creates a new instance of `ContractModule`. +impl LoadedModule { + /// Creates a new instance of `LoadedModule`. /// - /// Returns `Err` if the `original_code` couldn't be decoded or - /// if it contains an invalid module. - fn new(original_code: &[u8], schedule: &'a Schedule) -> Result { - let module = - elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?; - - // Return a `ContractModule` instance with - // __valid__ module. - Ok(ContractModule { module, schedule }) - } - - /// Ensures that module doesn't declare internal memories. - /// - /// In this runtime we only allow wasm module to import memory from the environment. - /// Memory section contains declarations of internal linear memories, so if we find one - /// we reject such a module. - fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { - if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { - return Err("module declares internal memory") - } - Ok(()) - } - - /// Ensures that tables declared in the module are not too big. - fn ensure_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { - if let Some(table_section) = self.module.table_section() { - // In Wasm MVP spec, there may be at most one table declared. Double check this - // explicitly just in case the Wasm version changes. - if table_section.entries().len() > 1 { - return Err("multiple tables declared") - } - if let Some(table_type) = table_section.entries().first() { - // Check the table's initial size as there is no instruction or environment function - // capable of growing the table. - if table_type.limits().initial() > limit { - return Err("table exceeds maximum size allowed") - } - } - } - Ok(()) - } - - /// Ensure that any `br_table` instruction adheres to its immediate value limit. - fn ensure_br_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { - let code_section = if let Some(type_section) = self.module.code_section() { - type_section - } else { - return Ok(()) - }; - for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { - use self::elements::Instruction::BrTable; - if let BrTable(table) = instr { - if table.table.len() > limit as usize { - return Err("BrTable's immediate value is too big.") - } - } - } - Ok(()) - } - - fn ensure_global_variable_limit(&self, limit: u32) -> Result<(), &'static str> { - if let Some(global_section) = self.module.global_section() { - if global_section.entries().len() > limit as usize { - return Err("module declares too many globals") - } - } - Ok(()) - } - - fn ensure_local_variable_limit(&self, limit: u32) -> Result<(), &'static str> { - if let Some(code_section) = self.module.code_section() { - for func_body in code_section.bodies() { - let locals_count: u32 = - func_body.locals().iter().map(|val_type| val_type.count()).sum(); - if locals_count > limit { - return Err("single function declares too many locals") - } - } + /// The inner Wasm module is checked not to have restricted WebAssembly proposals. + /// Returns `Err` if the `code` cannot be deserialized or if it contains an invalid module. + pub fn new( + code: &[u8], + determinism: Determinism, + stack_limits: Option, + ) -> Result { + // NOTE: wasmi does not support unstable WebAssembly features. The module is implicitly + // checked for not having those ones when creating `wasmi::Module` below. + let mut config = WasmiConfig::default(); + config + .wasm_multi_value(false) + .wasm_mutable_global(false) + .wasm_sign_extension(true) + .wasm_bulk_memory(false) + .wasm_reference_types(false) + .wasm_tail_call(false) + .wasm_extended_const(false) + .wasm_saturating_float_to_int(false) + .floats(matches!(determinism, Determinism::Relaxed)) + .consume_fuel(true) + .fuel_consumption_mode(FuelConsumptionMode::Eager); + + if let Some(stack_limits) = stack_limits { + config.set_stack_limits(stack_limits); } - Ok(()) - } - - /// Ensure that no function exists that has more parameters than allowed. - fn ensure_parameter_limit(&self, limit: u32) -> Result<(), &'static str> { - let type_section = if let Some(type_section) = self.module.type_section() { - type_section - } else { - return Ok(()) - }; - for Type::Function(func) in type_section.types() { - if func.params().len() > limit as usize { - return Err("Use of a function type with too many parameters.") - } - } - - Ok(()) - } + let engine = Engine::new(&config); + let module = + Module::new(&engine, code.clone()).map_err(|_| "Can't load the module into wasmi!")?; - fn inject_gas_metering(self, determinism: Determinism) -> Result { - let gas_rules = self.schedule.rules(determinism); - let backend = gas_metering::host_function::Injector::new("seal0", "gas"); - let contract_module = gas_metering::inject(self.module, backend, &gas_rules) - .map_err(|_| "gas instrumentation failed")?; - Ok(ContractModule { module: contract_module, schedule: self.schedule }) + // Return a `LoadedModule` instance with + // __valid__ module. + Ok(LoadedModule { module, engine }) } /// Check that the module has required exported functions. For now @@ -190,60 +97,32 @@ impl<'a, T: Config> ContractModule<'a, T> { fn scan_exports(&self) -> Result<(), &'static str> { let mut deploy_found = false; let mut call_found = false; - let module = &self.module; - - let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let export_entries = module.export_section().map(|is| is.entries()).unwrap_or(&[]); - let func_entries = module.function_section().map(|fs| fs.entries()).unwrap_or(&[]); - - // Function index space consists of imported function following by - // declared functions. Calculate the total number of imported functions so - // we can use it to convert indexes from function space to declared function space. - let fn_space_offset = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]) - .iter() - .filter(|entry| matches!(*entry.external(), External::Function(_))) - .count(); - - for export in export_entries { - match export.field() { - "call" => call_found = true, - "deploy" => deploy_found = true, - _ => return Err("unknown export: expecting only deploy and call functions"), - } - - // Then check the export kind. "call" and "deploy" are - // functions. - let fn_idx = match export.internal() { - Internal::Function(ref fn_idx) => *fn_idx, - _ => return Err("expected a function"), - }; - - // convert index from function index space to declared index space. - let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) { - Some(fn_idx) => fn_idx, - None => { - // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function") + let exports = module.exports(); + + for export in exports { + match export.ty() { + ExternType::Func(ft) => { + match export.name() { + "call" => call_found = true, + "deploy" => deploy_found = true, + _ => + return Err( + "unknown function export: expecting only deploy and call functions", + ), + } + // Check the signature. + // Both "call" and "deploy" have the () -> () function type. + // We still support () -> (i32) for backwards compatibility. + if !(ft.params().is_empty() && + (ft.results().is_empty() || ft.results() == [WasmiValueType::I32])) + { + return Err("entry point has wrong signature") + } }, - }; - - // Then check the signature. - // Both "call" and "deploy" has a () -> () function type. - // We still support () -> (i32) for backwards compatibility. - let func_ty_idx = func_entries - .get(fn_idx as usize) - .ok_or("export refers to non-existent function")? - .type_ref(); - let Type::Function(ref func_ty) = - types.get(func_ty_idx as usize).ok_or("function has a non-existent type")?; - if !(func_ty.params().is_empty() && - (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) - { - return Err("entry point has wrong signature") + ExternType::Memory(_) => return Err("memory export is forbidden"), + ExternType::Global(_) => return Err("global export is forbidden"), + ExternType::Table(_) => return Err("table export is forbidden"), } } @@ -259,307 +138,195 @@ impl<'a, T: Config> ContractModule<'a, T> { /// Scan an import section if any. /// - /// This makes sure that the import section looks as we expect it from a contract - /// and enforces and returns the memory type declared by the contract if any. + /// This makes sure that: + /// - The import section looks as we expect it from a contract. + /// - The limits of the memory type declared by the contract comply with the Schedule. + /// + /// Returns the checked memory limits back to caller. + /// + /// This method fails if: + /// + /// - Memory import not found in the module. + /// - Tables or globals found among imports. + /// - `call_chain_extension` host function is imported, while chain extensions are disabled. /// - /// `import_fn_banlist`: list of function names that are disallowed to be imported - fn scan_imports( + /// NOTE that only single memory instance is allowed for contract modules, which is enforced by + /// this check combined with multi_memory proposal gets disabled in the engine. + pub fn scan_imports( &self, - import_fn_banlist: &[&[u8]], - ) -> Result, &'static str> { + schedule: &Schedule, + ) -> Result<(u32, u32), &'static str> { let module = &self.module; - let import_entries = module.import_section().map(|is| is.entries()).unwrap_or(&[]); - let mut imported_mem_type = None; - - for import in import_entries { - match *import.external() { - External::Table(_) => return Err("Cannot import tables"), - External::Global(_) => return Err("Cannot import globals"), - External::Function(_) => { - if !T::ChainExtension::enabled() && - import.field().as_bytes() == b"seal_call_chain_extension" + let imports = module.imports(); + let mut memory_limits = None; + + for import in imports { + match *import.ty() { + ExternType::Table(_) => return Err("Cannot import tables"), + ExternType::Global(_) => return Err("Cannot import globals"), + ExternType::Func(_) => { + let _ = import.ty().func().ok_or("expected a function")?; + + if !::ChainExtension::enabled() && + (import.name().as_bytes() == b"seal_call_chain_extension" || + import.name().as_bytes() == b"call_chain_extension") { - return Err("module uses chain extensions but chain extensions are disabled") - } - - if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) { - return Err("module imports a banned function") + return Err("Module uses chain extensions but chain extensions are disabled") } }, - External::Memory(ref memory_type) => { - if import.module() != IMPORT_MODULE_MEMORY { + ExternType::Memory(mt) => { + if import.module().as_bytes() != IMPORT_MODULE_MEMORY.as_bytes() { return Err("Invalid module for imported memory") } - if import.field() != "memory" { + if import.name().as_bytes() != b"memory" { return Err("Memory import must have the field name 'memory'") } - if imported_mem_type.is_some() { + if memory_limits.is_some() { return Err("Multiple memory imports defined") } - imported_mem_type = Some(memory_type); + // Parse memory limits defaulting it to (0,0). + // Any access to it will then lead to out of bounds trap. + let (initial, maximum) = ( + mt.initial_pages().to_bytes().unwrap_or(0).saturating_div(BYTES_PER_PAGE) + as u32, + mt.maximum_pages().map_or(schedule.limits.memory_pages, |p| { + p.to_bytes().unwrap_or(0).saturating_div(BYTES_PER_PAGE) as u32 + }), + ); + if initial > maximum { + return Err( + "Requested initial number of memory pages should not exceed the requested maximum", + ) + } + if maximum > schedule.limits.memory_pages { + return Err("Maximum number of memory pages should not exceed the maximum configured in the Schedule") + } + + memory_limits = Some((initial, maximum)); continue }, } } - Ok(imported_mem_type) - } - fn into_wasm_code(self) -> Result, &'static str> { - elements::serialize(self.module).map_err(|_| "error serializing instrumented module") - } -} - -fn get_memory_limits( - module: Option<&MemoryType>, - schedule: &Schedule, -) -> Result<(u32, u32), &'static str> { - if let Some(memory_type) = module { - // Inspect the module to extract the initial and maximum page count. - let limits = memory_type.limits(); - match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => - Err("Requested initial number of pages should not exceed the requested maximum"), - (_, Some(maximum)) if maximum > schedule.limits.memory_pages => - Err("Maximum number of pages should not exceed the configured maximum."), - (initial, Some(maximum)) => Ok((initial, maximum)), - (_, None) => { - // Maximum number of pages should be always declared. - // This isn't a hard requirement and can be treated as a maximum set - // to configured maximum. - Err("Maximum number of pages should be always declared.") - }, - } - } else { - // If none memory imported then just create an empty placeholder. - // Any access to it will lead to out of bounds trap. - Ok((0, 0)) + memory_limits.ok_or("No memory import found in the module") } } -/// Check and instrument the given `original_code`. +/// Check that given `code` satisfies constraints required for the contract Wasm module. +/// This includes two groups of checks: /// -/// On success it returns the instrumented versions together with its `(initial, maximum)` -/// error requirement. The memory requirement was also validated against the `schedule`. -fn instrument( - original_code: &[u8], +/// 1. General engine-side validation makes sure the module is consistent and does not contain +/// forbidden WebAssembly features. +/// 2. Additional checks which are specific to smart contracts eligible for this pallet. +fn validate( + code: &[u8], schedule: &Schedule, determinism: Determinism, - try_instantiate: TryInstantiate, - reason: InstrumentReason, -) -> Result<(Vec, (u32, u32)), (DispatchError, &'static str)> +) -> Result<(), (DispatchError, &'static str)> where E: Environment<()>, T: Config, { - // Do not enable any features here. Any additional feature needs to be carefully - // checked for potential security issues. For example, enabling multi value could lead - // to a DoS vector: It breaks our assumption that branch instructions are of constant time. - // Depending on the implementation they can linearly depend on the amount of values returned - // from a block. - Validator::new_with_features(WasmFeatures { - relaxed_simd: false, - threads: false, - tail_call: false, - multi_memory: false, - exceptions: false, - memory64: false, - extended_const: false, - component_model: false, - // This is not our only defense: All instructions explicitly need to have weights assigned - // or the deployment will fail. We have none assigned for float instructions. - floats: matches!(determinism, Determinism::Relaxed), - mutable_global: false, - saturating_float_to_int: false, - sign_extension: false, - bulk_memory: false, - multi_value: false, - reference_types: false, - simd: false, - memory_control: false, - }) - .validate_all(original_code) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "{}", err); - (Error::::CodeRejected.into(), "validation of new code failed") - })?; - - let (code, (initial, maximum)) = (|| { - let contract_module = ContractModule::new(original_code, schedule)?; + (|| { + // We check that the module is generally valid, + // and does not have restricted WebAssembly features, here. + let contract_module = LoadedModule::new::(code, determinism, None)?; + // The we check that module satisfies constraints the pallet puts on contracts. contract_module.scan_exports()?; - contract_module.ensure_no_internal_memory()?; - contract_module.ensure_table_size_limit(schedule.limits.table_size)?; - contract_module.ensure_global_variable_limit(schedule.limits.globals)?; - contract_module.ensure_local_variable_limit(schedule.limits.locals)?; - contract_module.ensure_parameter_limit(schedule.limits.parameters)?; - contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; - - // We disallow importing `gas` function here since it is treated as implementation detail. - let disallowed_imports = [b"gas".as_ref()]; - let memory_limits = - get_memory_limits(contract_module.scan_imports(&disallowed_imports)?, schedule)?; - - let code = contract_module.inject_gas_metering(determinism)?.into_wasm_code()?; - - Ok((code, memory_limits)) + contract_module.scan_imports::(schedule)?; + Ok(()) })() .map_err(|msg: &str| { - log::debug!(target: LOG_TARGET, "new code rejected: {}", msg); + log::debug!(target: LOG_TARGET, "New code rejected on validation: {}", msg); (Error::::CodeRejected.into(), msg) })?; // This will make sure that the module can be actually run within wasmi: // - // - Doesn't use any unknown imports. - // - Doesn't explode the wasmi bytecode generation. - if matches!(try_instantiate, TryInstantiate::Instantiate) { - // We don't actually ever run any code so we can get away with a minimal stack which - // reduces the amount of memory that needs to be zeroed. - let stack_limits = StackLimits::new(1, 1, 0).expect("initial <= max; qed"); - PrefabWasmModule::::instantiate::( - &code, - (), - (initial, maximum), - stack_limits, - match reason { - InstrumentReason::New => AllowDeprecatedInterface::No, - InstrumentReason::Reinstrument => AllowDeprecatedInterface::Yes, - }, - ) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "{}", err); - (Error::::CodeRejected.into(), "new code rejected after instrumentation") - })?; - } + // - It doesn't use any unknown imports. + // - It doesn't explode the wasmi bytecode generation. + // + // We don't actually ever execute this instance so we can get away with a minimal stack which + // reduces the amount of memory that needs to be zeroed. + let stack_limits = StackLimits::new(1, 1, 0).expect("initial <= max; qed"); + WasmBlob::::instantiate::( + &code, + (), + schedule, + determinism, + stack_limits, + AllowDeprecatedInterface::No, + ) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "{}", err); + (Error::::CodeRejected.into(), "New code rejected on wasmi instantiation!") + })?; - Ok((code, (initial, maximum))) + Ok(()) } -/// Loads the given module given in `original_code`, performs some checks on it and -/// does some preprocessing. +/// Validates the given binary `code` is a valid Wasm module satisfying following constraints: /// -/// The checks are: +/// - The module doesn't export any memory. +/// - The module does imports memory, which limits lay within the limits permitted by the +/// `schedule`. +/// - All imported functions from the external environment match defined by `env` module. /// -/// - the provided code is a valid wasm module -/// - the module doesn't define an internal memory instance -/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule` -/// - all imported functions from the external environment matches defined by `env` module -/// -/// The preprocessing includes injecting code for gas metering and metering the height of stack. +/// Also constructs contract `code_info` by calculating the storage deposit. pub fn prepare( - original_code: CodeVec, + code: CodeVec, schedule: &Schedule, owner: AccountIdOf, determinism: Determinism, - try_instantiate: TryInstantiate, -) -> Result, (DispatchError, &'static str)> +) -> Result, (DispatchError, &'static str)> where E: Environment<()>, T: Config, { - let (code, (initial, maximum)) = instrument::( - original_code.as_ref(), - schedule, - determinism, - try_instantiate, - InstrumentReason::New, - )?; - - let original_code_len = original_code.len(); - - let mut module = PrefabWasmModule { - instruction_weights_version: schedule.instruction_weights.version, - initial, - maximum, - code: code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, - code_hash: T::Hashing::hash(&original_code), - original_code: Some(original_code), - owner_info: None, - determinism, - }; + validate::(code.as_ref(), schedule, determinism)?; - // We need to add the sizes of the `#[codec(skip)]` fields which are stored in different - // storage items. This is also why we have `3` items added and not only one. - let bytes_added = module - .encoded_size() - .saturating_add(original_code_len) - .saturating_add(>::max_encoded_len()) as u32; - let deposit = Diff { bytes_added, items_added: 3, ..Default::default() } + // Calculate deposit for storing contract code and `code_info` in two different storage items. + let code_len = code.len() as u32; + let bytes_added = code_len.saturating_add(>::max_encoded_len() as u32); + let deposit = Diff { bytes_added, items_added: 2, ..Default::default() } .update_contract::(None) .charge_or_zero(); + let code_info = CodeInfo { owner, deposit, determinism, refcount: 0, code_len }; + let code_hash = T::Hashing::hash(&code); - module.owner_info = Some(OwnerInfo { owner, deposit, refcount: 0 }); - - Ok(module) -} - -/// Same as [`prepare`] but without constructing a new module. -/// -/// Used to update the code of an existing module to the newest [`Schedule`] version. -/// Stictly speaking is not necessary to check the existing code before reinstrumenting because -/// it can't change in the meantime. However, since we recently switched the validation library -/// we want to re-validate to weed out any bugs that were lurking in the old version. -pub fn reinstrument( - original_code: &[u8], - schedule: &Schedule, - determinism: Determinism, -) -> Result, DispatchError> -where - E: Environment<()>, - T: Config, -{ - instrument::( - original_code, - schedule, - determinism, - // This function was triggered by an interaction with an existing contract code - // that will try to instantiate anyways. Failing here would not help - // as the contract is already on chain. - TryInstantiate::Skip, - InstrumentReason::Reinstrument, - ) - .map_err(|(err, msg)| { - log::error!(target: LOG_TARGET, "CodeRejected during reinstrument: {}", msg); - err - }) - .map(|(code, _)| code) + Ok(WasmBlob { code, code_info, code_hash }) } /// Alternate (possibly unsafe) preparation functions used only for benchmarking and testing. /// /// For benchmarking we need to construct special contracts that might not pass our -/// sanity checks or need to skip instrumentation for correct results. We hide functions -/// allowing this behind a feature that is only set during benchmarking or testing to -/// prevent usage in production code. +/// sanity checks. We hide functions allowing this behind a feature that is only set during +/// benchmarking or testing to prevent usage in production code. #[cfg(any(test, feature = "runtime-benchmarks"))] pub mod benchmarking { use super::*; - /// Prepare function that neither checks nor instruments the passed in code. + /// Prepare function that does not perform export section checks on the passed in code. pub fn prepare( - original_code: Vec, + code: Vec, schedule: &Schedule, owner: AccountIdOf, - ) -> Result, &'static str> { - let contract_module = ContractModule::new(&original_code, schedule)?; - let memory_limits = get_memory_limits(contract_module.scan_imports(&[])?, schedule)?; - Ok(PrefabWasmModule { - instruction_weights_version: schedule.instruction_weights.version, - initial: memory_limits.0, - maximum: memory_limits.1, - code_hash: T::Hashing::hash(&original_code), - original_code: Some(original_code.try_into().map_err(|_| "Original code too large")?), - code: contract_module - .into_wasm_code()? - .try_into() - .map_err(|_| "Instrumented code too large")?, - owner_info: Some(OwnerInfo { - owner, - // this is a helper function for benchmarking which skips deposit collection - deposit: Default::default(), - refcount: 0, - }), - determinism: Determinism::Enforced, - }) + ) -> Result, DispatchError> { + let determinism = Determinism::Enforced; + let contract_module = LoadedModule::new::(&code, determinism, None)?; + let _ = contract_module.scan_imports::(schedule)?; + let code: CodeVec = code.try_into().map_err(|_| >::CodeTooLarge)?; + let code_info = CodeInfo { + owner, + // this is a helper function for benchmarking which skips deposit collection + deposit: Default::default(), + refcount: 0, + code_len: code.len() as u32, + determinism, + }; + let code_hash = T::Hashing::hash(&code); + + Ok(WasmBlob { code, code_info, code_hash }) } } @@ -574,9 +341,9 @@ mod tests { use pallet_contracts_proc_macro::define_env; use std::fmt; - impl fmt::Debug for PrefabWasmModule { + impl fmt::Debug for WasmBlob { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "PreparedContract {{ .. }}") + write!(f, "ContractCode {{ .. }}") } } @@ -633,7 +400,6 @@ mod tests { &schedule, ALICE, Determinism::Enforced, - TryInstantiate::Instantiate, ); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); } @@ -654,108 +420,9 @@ mod tests { ) (func (export "deploy")) )"#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); - mod functions { - use super::*; - - prepare_test!( - param_number_valid, - r#" - (module - (func (export "call")) - (func (export "deploy")) - (func (param i32 i32 i32)) - ) - "#, - Ok(_) - ); - - prepare_test!( - param_number_invalid, - r#" - (module - (func (export "call")) - (func (export "deploy")) - (func (param i32 i32 i32 i32)) - (func (param i32)) - ) - "#, - Err("Use of a function type with too many parameters.") - ); - } - - mod globals { - use super::*; - - prepare_test!( - global_number_valid, - r#" - (module - (global i64 (i64.const 0)) - (global i64 (i64.const 0)) - (global i64 (i64.const 0)) - (func (export "call")) - (func (export "deploy")) - ) - "#, - Ok(_) - ); - - prepare_test!( - global_number_too_high, - r#" - (module - (global i64 (i64.const 0)) - (global i64 (i64.const 0)) - (global i64 (i64.const 0)) - (global i64 (i64.const 0)) - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("module declares too many globals") - ); - } - - mod locals { - use super::*; - - prepare_test!( - local_number_valid, - r#" - (module - (func - (local i32) - (local i32) - (local i32) - ) - (func (export "call")) - (func (export "deploy")) - ) - "#, - Ok(_) - ); - - prepare_test!( - local_number_too_high, - r#" - (module - (func - (local i32) - (local i32) - (local i32) - (local i32) - ) - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("single function declares too many locals") - ); - } - mod memories { use super::*; @@ -782,7 +449,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("module declares internal memory") + Err("No memory import found in the module") ); prepare_test!( @@ -794,7 +461,7 @@ mod tests { (func (export "call")) (func (export "deploy")) )"#, - Ok(_) + Err("No memory import found in the module") ); prepare_test!( @@ -807,20 +474,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") - ); - - prepare_test!( - no_maximum, - r#" - (module - (import "env" "memory" (memory 1)) - - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("Maximum number of pages should be always declared.") + Err("Can't load the module into wasmi!") ); prepare_test!( @@ -846,7 +500,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("Maximum number of pages should not exceed the configured maximum.") + Err("Maximum number of memory pages should not exceed the maximum configured in the Schedule") ); prepare_test!( @@ -873,7 +527,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); prepare_test!( @@ -902,13 +556,16 @@ mod tests { ); } - mod tables { + mod imports { use super::*; prepare_test!( - no_tables, + can_import_legit_function, r#" (module + (import "seal0" "nop" (func (param i64))) + (import "env" "memory" (memory 1 1)) + (func (export "call")) (func (export "deploy")) ) @@ -916,69 +573,41 @@ mod tests { Ok(_) ); + // memory is in "env" and not in "seal0" prepare_test!( - table_valid_size, + memory_not_in_seal0, r#" (module - (table 3 funcref) + (import "seal0" "memory" (memory 1 1)) (func (export "call")) (func (export "deploy")) ) "#, - Ok(_) + Err("Invalid module for imported memory") ); + // Memory is in "env" and not in some arbitrary module prepare_test!( - table_too_big, + memory_not_in_arbitrary_module, r#" (module - (table 4 funcref) - - (func (export "call")) - (func (export "deploy")) - )"#, - Err("table exceeds maximum size allowed") - ); + (import "any_module" "memory" (memory 1 1)) - prepare_test!( - br_table_valid_size, - r#" - (module (func (export "call")) (func (export "deploy")) - (func - i32.const 0 - br_table 0 0 0 0 - ) ) "#, - Ok(_) + Err("Invalid module for imported memory") ); prepare_test!( - br_table_too_big, + function_in_other_module_works, r#" (module - (func (export "call")) - (func (export "deploy")) - (func - i32.const 0 - br_table 0 0 0 0 0 - ) - )"#, - Err("BrTable's immediate value is too big.") - ); - } - - mod imports { - use super::*; + (import "seal1" "nop" (func (param i32))) + (import "env" "memory" (memory 1 1)) - prepare_test!( - can_import_legit_function, - r#" - (module - (import "seal0" "nop" (func (param i64))) (func (export "call")) (func (export "deploy")) @@ -987,55 +616,57 @@ mod tests { Ok(_) ); - // even though gas is defined the contract can't import it since - // it is an implementation defined. prepare_test!( - can_not_import_gas_function, + wrong_signature, r#" (module - (import "seal0" "gas" (func (param i32))) + (import "seal0" "input" (func (param i64))) + (import "env" "memory" (memory 1 1)) (func (export "call")) (func (export "deploy")) ) "#, - Err("module imports a banned function") + Err("New code rejected on wasmi instantiation!") ); - // memory is in "env" and not in "seal0" prepare_test!( - memory_not_in_seal0, + unknown_func_name, r#" (module - (import "seal0" "memory" (memory 1 1)) + (import "seal0" "unknown_func" (func)) (func (export "call")) (func (export "deploy")) ) "#, - Err("Invalid module for imported memory") + Err("No memory import found in the module") ); - // memory is in "env" and not in some arbitrary module + // Try to import function from not a "seal*" module. prepare_test!( - memory_not_in_arbitrary_module, + try_import_from_wrong_module, r#" (module - (import "any_module" "memory" (memory 1 1)) + (import "env" "panic" (func)) + (import "env" "memory" (memory 1 1)) (func (export "call")) (func (export "deploy")) ) "#, - Err("Invalid module for imported memory") + Err("New code rejected on wasmi instantiation!") ); + } + + mod entrypoints { + use super::*; prepare_test!( - function_in_other_module_works, + it_works, r#" (module - (import "seal1" "nop" (func (param i32))) - + (import "env" "memory" (memory 1 1)) (func (export "call")) (func (export "deploy")) ) @@ -1043,46 +674,31 @@ mod tests { Ok(_) ); - // wrong signature prepare_test!( - wrong_signature, + signed_extension_works, r#" (module - (import "seal0" "gas" (func (param i64))) - - (func (export "call")) + (import "env" "memory" (memory 1 1)) (func (export "deploy")) - ) - "#, - Err("module imports a banned function") - ); - - prepare_test!( - unknown_func_name, - r#" - (module - (import "seal0" "unknown_func" (func)) - (func (export "call")) - (func (export "deploy")) + (func (param i32) (result i32) + local.get 0 + i32.extend8_s + ) ) "#, - Err("new code rejected after instrumentation") + Ok(_) ); - } - - mod entrypoints { - use super::*; prepare_test!( - it_works, + omit_memory, r#" (module (func (export "call")) (func (export "deploy")) ) "#, - Ok(_) + Err("No memory import found in the module") ); prepare_test!( @@ -1106,21 +722,23 @@ mod tests { ); // Try to use imported function as an entry point. + // This is allowed. prepare_test!( try_sneak_export_as_entrypoint, r#" (module (import "seal0" "panic" (func)) + (import "env" "memory" (memory 1 1)) (func (export "deploy")) (export "call" (func 0)) ) "#, - Err("entry point points to an imported function") + Ok(_) ); - // Try to use imported function as an entry point. + // Try to use global as an entry point. prepare_test!( try_sneak_export_as_global, r#" @@ -1129,7 +747,7 @@ mod tests { (global (export "call") i32 (i32.const 0)) ) "#, - Err("expected a function") + Err("global export is forbidden") ); prepare_test!( @@ -1152,7 +770,7 @@ mod tests { (func (export "whatevs")) ) "#, - Err("unknown export: expecting only deploy and call functions") + Err("unknown function export: expecting only deploy and call functions") ); prepare_test!( @@ -1164,7 +782,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); prepare_test!( @@ -1176,7 +794,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); prepare_test!( @@ -1188,7 +806,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); prepare_test!( @@ -1200,7 +818,7 @@ mod tests { (func (export "deploy")) ) "#, - Err("validation of new code failed") + Err("Can't load the module into wasmi!") ); } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index ae02e5badbf39..9d29da70a87d9 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -67,7 +67,6 @@ pub trait Environment { } /// Type of a storage key. -#[allow(dead_code)] enum KeyType { /// Legacy fix sized key `[u8;32]`. Fix, @@ -174,9 +173,6 @@ impl HostError for TrapReason {} #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeCosts { - /// Charge the gas meter with the cost of a metering block. The charged costs are - /// the supplied cost of the block plus the overhead of the metering itself. - MeteringBlock(u64), /// Weight charged for copying data from the sandbox. CopyFromContract(u32), /// Weight charged for copying data to the sandbox. @@ -271,13 +267,16 @@ pub enum RuntimeCosts { AccountEntranceCount, /// Weight of calling `instantiation_nonce` InstantationNonce, + /// Weight of calling `add_delegate_dependency` + AddDelegateDependency, + /// Weight of calling `remove_delegate_dependency` + RemoveDelegateDependency, } impl RuntimeCosts { fn token(&self, s: &HostFnWeights) -> RuntimeToken { use self::RuntimeCosts::*; let weight = match *self { - MeteringBlock(amount) => s.gas.saturating_add(Weight::from_parts(amount, 0)), CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), Caller => s.caller, @@ -353,6 +352,8 @@ impl RuntimeCosts { ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, InstantationNonce => s.instantiation_nonce, + AddDelegateDependency => s.add_delegate_dependency, + RemoveDelegateDependency => s.remove_delegate_dependency, }; RuntimeToken { #[cfg(test)] @@ -369,7 +370,7 @@ impl RuntimeCosts { macro_rules! charge_gas { ($runtime:expr, $costs:expr) => {{ let token = $costs.token(&$runtime.ext.schedule().host_fn_weights); - $runtime.ext.gas_meter().charge(token) + $runtime.ext.gas_meter_mut().charge(token) }}; } @@ -485,25 +486,40 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { /// Converts the sandbox result and the runtime state into the execution outcome. pub fn to_execution_result(self, sandbox_result: Result<(), wasmi::Error>) -> ExecResult { + use wasmi::core::TrapCode::OutOfFuel; use TrapReason::*; + match sandbox_result { // Contract returned from main function -> no data was returned. Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + // `OutOfGas` when host asks engine to consume more than left in the _store_. + // We should never get this case, as gas meter is being charged (and hence raises error) + // first. + Err(wasmi::Error::Store(_)) => Err(Error::::OutOfGas.into()), // Contract either trapped or some host function aborted the execution. Err(wasmi::Error::Trap(trap)) => { + if let Some(OutOfFuel) = trap.trap_code() { + // `OutOfGas` during engine execution. + return Err(Error::::OutOfGas.into()) + } // If we encoded a reason then it is some abort generated by a host function. - // Otherwise the trap came from the contract. - let reason: TrapReason = trap.downcast().ok_or(Error::::ContractTrapped)?; - match reason { - Return(ReturnData { flags, data }) => { - let flags = - ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; - Ok(ExecReturnValue { flags, data }) - }, - Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), - SupervisorError(error) => return Err(error.into()), + if let Some(reason) = &trap.downcast_ref::() { + match &reason { + Return(ReturnData { flags, data }) => { + let flags = ReturnFlags::from_bits(*flags) + .ok_or(Error::::InvalidCallFlags)?; + return Ok(ExecReturnValue { flags, data: data.to_vec() }) + }, + Termination => + return Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }), + SupervisorError(error) => return Err((*error).into()), + } } + // Otherwise the trap came from the contract itself. + Err(Error::::ContractTrapped.into()) }, // Any other error is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). @@ -536,7 +552,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { /// refunded to match the actual amount. pub fn adjust_gas(&mut self, charged: ChargedAmount, actual_costs: RuntimeCosts) { let token = actual_costs.token(&self.ext.schedule().host_fn_weights); - self.ext.gas_meter().adjust_gas(charged, token); + self.ext.gas_meter_mut().adjust_gas(charged, token); } /// Read designated chunk from the sandbox memory. @@ -1004,18 +1020,6 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { - /// Account for used gas. Traps if gas used is greater than gas limit. - /// - /// NOTE: This is a implementation defined call and is NOT a part of the public API. - /// This call is supposed to be called only by instrumentation injected code. - /// It deals only with the *ref_time* Weight. - /// - /// - `amount`: How much gas is used. - fn gas(ctx: _, _memory: _, amount: u64) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; - Ok(()) - } - /// Set the value at the given key in the contract storage. /// /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the @@ -1575,9 +1579,6 @@ pub mod env { /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. /// - /// `value` must be at least the minimum balance. Otherwise the instantiation fails and the - /// contract is not created. - /// /// # Parameters /// /// - `code_hash_ptr`: a pointer to the buffer that contains the initializer code. @@ -2093,7 +2094,7 @@ pub mod env { /// `out_ptr`. This call overwrites it with the size of the value. If the available /// space at `out_ptr` is less than the size of the value a trap is triggered. /// - /// The data is encoded as (T::Hash, T::BlockNumber). + /// The data is encoded as (T::Hash, frame_system::pallet_prelude::BlockNumberFor::). /// /// # Changes from v0 /// @@ -2819,9 +2820,34 @@ pub mod env { /// /// The nonce is incremented for each successful contract instantiation. This is a /// sensible default salt for contract instantiations. - #[unstable] fn instantiation_nonce(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::InstantationNonce)?; Ok(ctx.ext.nonce()) } + + /// Adds a new delegate dependency to the contract. + /// + /// # Parameters + /// + /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + #[unstable] + fn add_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::AddDelegateDependency)?; + let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; + ctx.ext.add_delegate_dependency(code_hash)?; + Ok(()) + } + + /// Removes the delegate dependency from the contract. + /// + /// # Parameters + /// + /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + #[unstable] + fn remove_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::RemoveDelegateDependency)?; + let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; + ctx.ext.remove_delegate_dependency(&code_hash)?; + Ok(()) + } } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 0a7f3ddf1ca4f..d8f873b0615be 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -15,25 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_contracts +//! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-07-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-ynta1nyy-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// target/production/substrate-node // benchmark // pallet // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json // --pallet=pallet_contracts // --chain=dev // --header=./HEADER-APACHE2 @@ -48,11 +47,21 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_contracts. +/// Weight functions needed for `pallet_contracts`. pub trait WeightInfo { fn on_process_deletion_queue_batch() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; - fn reinstrument(c: u32, ) -> Weight; + fn v9_migration_step(c: u32, ) -> Weight; + fn v10_migration_step() -> Weight; + fn v11_migration_step(k: u32, ) -> Weight; + fn v12_migration_step(c: u32, ) -> Weight; + fn v13_migration_step() -> Weight; + fn v14_migration_step() -> Weight; + fn migration_noop() -> Weight; + fn migrate() -> Weight; + fn on_runtime_upgrade_noop() -> Weight; + fn on_runtime_upgrade_in_progress() -> Weight; + fn on_runtime_upgrade() -> Weight; fn call_with_code_per_byte(c: u32, ) -> Weight; fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight; fn instantiate(i: u32, s: u32, ) -> Weight; @@ -74,7 +83,6 @@ pub trait WeightInfo { fn seal_block_number(r: u32, ) -> Weight; fn seal_now(r: u32, ) -> Weight; fn seal_weight_to_fee(r: u32, ) -> Weight; - fn seal_gas(r: u32, ) -> Weight; fn seal_input(r: u32, ) -> Weight; fn seal_input_per_byte(n: u32, ) -> Weight; fn seal_return(r: u32, ) -> Weight; @@ -115,1559 +123,1870 @@ pub trait WeightInfo { fn seal_ecdsa_recover(r: u32, ) -> Weight; fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; fn seal_set_code_hash(r: u32, ) -> Weight; + fn add_delegate_dependency(r: u32, ) -> Weight; + fn remove_delegate_dependency(r: u32, ) -> Weight; fn seal_reentrance_count(r: u32, ) -> Weight; fn seal_account_reentrance_count(r: u32, ) -> Weight; fn seal_instantiation_nonce(r: u32, ) -> Weight; fn instr_i64const(r: u32, ) -> Weight; - fn instr_i64load(r: u32, ) -> Weight; - fn instr_i64store(r: u32, ) -> Weight; - fn instr_select(r: u32, ) -> Weight; - fn instr_if(r: u32, ) -> Weight; - fn instr_br(r: u32, ) -> Weight; - fn instr_br_if(r: u32, ) -> Weight; - fn instr_br_table(r: u32, ) -> Weight; - fn instr_br_table_per_entry(e: u32, ) -> Weight; - fn instr_call(r: u32, ) -> Weight; - fn instr_call_indirect(r: u32, ) -> Weight; - fn instr_call_per_local(l: u32, ) -> Weight; - fn instr_local_get(r: u32, ) -> Weight; - fn instr_local_set(r: u32, ) -> Weight; - fn instr_local_tee(r: u32, ) -> Weight; - fn instr_global_get(r: u32, ) -> Weight; - fn instr_global_set(r: u32, ) -> Weight; - fn instr_memory_current(r: u32, ) -> Weight; - fn instr_memory_grow(r: u32, ) -> Weight; - fn instr_i64clz(r: u32, ) -> Weight; - fn instr_i64ctz(r: u32, ) -> Weight; - fn instr_i64popcnt(r: u32, ) -> Weight; - fn instr_i64eqz(r: u32, ) -> Weight; - fn instr_i64extendsi32(r: u32, ) -> Weight; - fn instr_i64extendui32(r: u32, ) -> Weight; - fn instr_i32wrapi64(r: u32, ) -> Weight; - fn instr_i64eq(r: u32, ) -> Weight; - fn instr_i64ne(r: u32, ) -> Weight; - fn instr_i64lts(r: u32, ) -> Weight; - fn instr_i64ltu(r: u32, ) -> Weight; - fn instr_i64gts(r: u32, ) -> Weight; - fn instr_i64gtu(r: u32, ) -> Weight; - fn instr_i64les(r: u32, ) -> Weight; - fn instr_i64leu(r: u32, ) -> Weight; - fn instr_i64ges(r: u32, ) -> Weight; - fn instr_i64geu(r: u32, ) -> Weight; - fn instr_i64add(r: u32, ) -> Weight; - fn instr_i64sub(r: u32, ) -> Weight; - fn instr_i64mul(r: u32, ) -> Weight; - fn instr_i64divs(r: u32, ) -> Weight; - fn instr_i64divu(r: u32, ) -> Weight; - fn instr_i64rems(r: u32, ) -> Weight; - fn instr_i64remu(r: u32, ) -> Weight; - fn instr_i64and(r: u32, ) -> Weight; - fn instr_i64or(r: u32, ) -> Weight; - fn instr_i64xor(r: u32, ) -> Weight; - fn instr_i64shl(r: u32, ) -> Weight; - fn instr_i64shrs(r: u32, ) -> Weight; - fn instr_i64shru(r: u32, ) -> Weight; - fn instr_i64rotl(r: u32, ) -> Weight; - fn instr_i64rotr(r: u32, ) -> Weight; } -/// Weights for pallet_contracts using the Substrate node and recommended hardware. +/// Weights for `pallet_contracts` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Contracts DeletionQueueCounter (r:1 w:0) - /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) + /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:0) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `1594` - // Minimum execution time: 2_627_000 picoseconds. - Weight::from_parts(2_748_000, 1594) + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 2_527_000 picoseconds. + Weight::from_parts(2_651_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `488 + k * (69 ±0)` - // Estimated: `478 + k * (70 ±0)` - // Minimum execution time: 13_607_000 picoseconds. - Weight::from_parts(8_026_118, 478) - // Standard Error: 1_323 - .saturating_add(Weight::from_parts(980_583, 0).saturating_mul(k.into())) + // Measured: `451 + k * (69 ±0)` + // Estimated: `441 + k * (70 ±0)` + // Minimum execution time: 13_291_000 picoseconds. + Weight::from_parts(13_825_000, 441) + // Standard Error: 1_137 + .saturating_add(Weight::from_parts(1_244_309, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: Contracts PristineCode (r:1 w:0) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// The range of component `c` is `[0, 61717]`. - fn reinstrument(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `238 + c * (1 ±0)` - // Estimated: `3708 + c * (1 ±0)` - // Minimum execution time: 30_563_000 picoseconds. - Weight::from_parts(22_292_544, 3708) - // Standard Error: 60 - .saturating_add(Weight::from_parts(54_541, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) + /// The range of component `c` is `[0, 125952]`. + fn v9_migration_step(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `211 + c * (1 ±0)` + // Estimated: `6149 + c * (1 ±0)` + // Minimum execution time: 8_359_000 picoseconds. + Weight::from_parts(9_179_121, 6149) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_312, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) + } + /// Storage: `Contracts::ContractInfoOf` (r:3 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + fn v10_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `709` + // Estimated: `9124` + // Minimum execution time: 42_457_000 picoseconds. + Weight::from_parts(44_556_000, 9124) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) + /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// The range of component `k` is `[0, 1024]`. + fn v11_migration_step(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `171 + k * (1 ±0)` + // Estimated: `3635 + k * (1 ±0)` + // Minimum execution time: 3_839_000 picoseconds. + Weight::from_parts(3_462_337, 3635) + // Standard Error: 1_384 + .saturating_add(Weight::from_parts(1_166_522, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) + } + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn v12_migration_step(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `325 + c * (1 ±0)` + // Estimated: `6263 + c * (1 ±0)` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_095_380, 6263) + // Standard Error: 1 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::ContractInfoOf` (r:3 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + fn v13_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `639` + // Estimated: `9054` + // Minimum execution time: 35_342_000 picoseconds. + Weight::from_parts(36_839_000, 9054) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + fn v14_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `260` + // Estimated: `6200` + // Minimum execution time: 27_995_000 picoseconds. + Weight::from_parts(28_661_000, 6200) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn migration_noop() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 3_272_000 picoseconds. + Weight::from_parts(3_553_000, 1627) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) + fn migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `166` + // Estimated: `3631` + // Minimum execution time: 12_788_000 picoseconds. + Weight::from_parts(13_163_000, 3631) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + fn on_runtime_upgrade_noop() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 4_794_000 picoseconds. + Weight::from_parts(5_086_000, 3607) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn on_runtime_upgrade_in_progress() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `3632` + // Minimum execution time: 6_616_000 picoseconds. + Weight::from_parts(7_034_000, 3632) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn on_runtime_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 6_985_000 picoseconds. + Weight::from_parts(7_477_000, 3607) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `6656 + c * (1 ±0)` - // Minimum execution time: 268_884_000 picoseconds. - Weight::from_parts(277_799_331, 6656) - // Standard Error: 23 - .saturating_add(Weight::from_parts(37_876, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `783` + // Estimated: `6732 + c * (1 ±0)` + // Minimum execution time: 306_088_000 picoseconds. + Weight::from_parts(268_361_911, 6732) + // Standard Error: 76 + .saturating_add(Weight::from_parts(38_334, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// The range of component `c` is `[0, 61717]`. + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `8659` - // Minimum execution time: 3_159_921_000 picoseconds. - Weight::from_parts(594_826_134, 8659) - // Standard Error: 290 - .saturating_add(Weight::from_parts(106_471, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_417, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) - } - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + // Measured: `303` + // Estimated: `8745` + // Minimum execution time: 4_224_657_000 picoseconds. + Weight::from_parts(451_557_864, 8745) + // Standard Error: 216 + .saturating_add(Weight::from_parts(111_761, 0).saturating_mul(c.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(1_794, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(2_013, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `6408` - // Minimum execution time: 1_653_811_000 picoseconds. - Weight::from_parts(296_038_081, 6408) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_461, 0).saturating_mul(i.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_430, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `527` + // Estimated: `6517` + // Minimum execution time: 2_029_313_000 picoseconds. + Weight::from_parts(353_077_600, 6517) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_781, 0).saturating_mul(i.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_729, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `759` - // Estimated: `6699` - // Minimum execution time: 195_916_000 picoseconds. - Weight::from_parts(196_706_000, 6699) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `817` + // Estimated: `6757` + // Minimum execution time: 204_086_000 picoseconds. + Weight::from_parts(216_738_000, 6757) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1 w:1) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// The range of component `c` is `[0, 61717]`. + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 251_137_000 picoseconds. - Weight::from_parts(252_985_435, 3574) - // Standard Error: 88 - .saturating_add(Weight::from_parts(108_141, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 269_337_000 picoseconds. + Weight::from_parts(220_186_006, 3607) + // Standard Error: 106 + .saturating_add(Weight::from_parts(74_291, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1 w:1) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 33_521_000 picoseconds. - Weight::from_parts(34_039_000, 3720) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `259` + // Estimated: `3724` + // Minimum execution time: 35_127_000 picoseconds. + Weight::from_parts(36_180_000, 3724) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:2 w:2) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `570` - // Estimated: `8985` - // Minimum execution time: 33_477_000 picoseconds. - Weight::from_parts(33_890_000, 8985) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `576` + // Estimated: `8991` + // Minimum execution time: 37_550_000 picoseconds. + Weight::from_parts(39_149_000, 8991) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `781 + r * (6 ±0)` - // Estimated: `6722 + r * (6 ±0)` - // Minimum execution time: 239_374_000 picoseconds. - Weight::from_parts(246_017_099, 6722) - // Standard Error: 539 - .saturating_add(Weight::from_parts(323_826, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `857 + r * (6 ±0)` + // Estimated: `6798 + r * (6 ±0)` + // Minimum execution time: 269_991_000 picoseconds. + Weight::from_parts(293_993_592, 6798) + // Standard Error: 665 + .saturating_add(Weight::from_parts(343_796, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `839 + r * (240 ±0)` - // Estimated: `6743 + r * (2715 ±0)` - // Minimum execution time: 240_656_000 picoseconds. - Weight::from_parts(87_361_934, 6743) - // Standard Error: 5_912 - .saturating_add(Weight::from_parts(3_329_840, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `924 + r * (232 ±0)` + // Estimated: `6831 + r * (2707 ±0)` + // Minimum execution time: 274_151_000 picoseconds. + Weight::from_parts(83_529_206, 6831) + // Standard Error: 8_452 + .saturating_add(Weight::from_parts(3_534_024, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2715).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2707).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `831 + r * (244 ±0)` - // Estimated: `6747 + r * (2719 ±0)` - // Minimum execution time: 243_026_000 picoseconds. - Weight::from_parts(76_953_007, 6747) - // Standard Error: 6_640 - .saturating_add(Weight::from_parts(4_132_521, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `910 + r * (236 ±0)` + // Estimated: `6835 + r * (2711 ±0)` + // Minimum execution time: 276_689_000 picoseconds. + Weight::from_parts(110_268_281, 6835) + // Standard Error: 8_106 + .saturating_add(Weight::from_parts(4_376_136, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2719).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2711).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `788 + r * (6 ±0)` - // Estimated: `6730 + r * (6 ±0)` - // Minimum execution time: 242_736_000 picoseconds. - Weight::from_parts(243_136_007, 6730) - // Standard Error: 912 - .saturating_add(Weight::from_parts(414_717, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `864 + r * (6 ±0)` + // Estimated: `6806 + r * (6 ±0)` + // Minimum execution time: 274_079_000 picoseconds. + Weight::from_parts(282_258_090, 6806) + // Standard Error: 1_343 + .saturating_add(Weight::from_parts(464_680, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 240_130_000 picoseconds. - Weight::from_parts(244_517_187, 6723) - // Standard Error: 384 - .saturating_add(Weight::from_parts(167_431, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `854 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 270_960_000 picoseconds. + Weight::from_parts(281_985_584, 6799) + // Standard Error: 378 + .saturating_add(Weight::from_parts(184_462, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + r * (3 ±0)` - // Estimated: `6608 + r * (3 ±0)` - // Minimum execution time: 228_022_000 picoseconds. - Weight::from_parts(232_385_198, 6608) - // Standard Error: 300 - .saturating_add(Weight::from_parts(145_143, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `744 + r * (3 ±0)` + // Estimated: `6684 + r * (3 ±0)` + // Minimum execution time: 244_835_000 picoseconds. + Weight::from_parts(270_660_753, 6684) + // Standard Error: 390 + .saturating_add(Weight::from_parts(164_232, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `782 + r * (6 ±0)` - // Estimated: `6724 + r * (6 ±0)` - // Minimum execution time: 240_250_000 picoseconds. - Weight::from_parts(240_268_824, 6724) - // Standard Error: 945 - .saturating_add(Weight::from_parts(329_577, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `858 + r * (6 ±0)` + // Estimated: `6800 + r * (6 ±0)` + // Minimum execution time: 273_269_000 picoseconds. + Weight::from_parts(274_468_168, 6800) + // Standard Error: 2_246 + .saturating_add(Weight::from_parts(386_838, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (6 ±0)` - // Estimated: `6719 + r * (6 ±0)` - // Minimum execution time: 242_370_000 picoseconds. - Weight::from_parts(242_389_500, 6719) - // Standard Error: 712 - .saturating_add(Weight::from_parts(518_380, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `854 + r * (6 ±0)` + // Estimated: `6795 + r * (6 ±0)` + // Minimum execution time: 275_244_000 picoseconds. + Weight::from_parts(281_299_739, 6795) + // Standard Error: 2_890 + .saturating_add(Weight::from_parts(600_498, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:2 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (6 ±0)` - // Estimated: `6846 + r * (6 ±0)` - // Minimum execution time: 238_563_000 picoseconds. - Weight::from_parts(253_511_314, 6846) - // Standard Error: 1_571 - .saturating_add(Weight::from_parts(1_454_089, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `998 + r * (6 ±0)` + // Estimated: `6922 + r * (6 ±0)` + // Minimum execution time: 271_540_000 picoseconds. + Weight::from_parts(298_456_935, 6922) + // Standard Error: 2_881 + .saturating_add(Weight::from_parts(1_719_337, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `792 + r * (6 ±0)` - // Estimated: `6741 + r * (6 ±0)` - // Minimum execution time: 242_995_000 picoseconds. - Weight::from_parts(240_061_456, 6741) - // Standard Error: 2_650 - .saturating_add(Weight::from_parts(326_813, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `6817 + r * (6 ±0)` + // Minimum execution time: 274_832_000 picoseconds. + Weight::from_parts(286_078_648, 6817) + // Standard Error: 695 + .saturating_add(Weight::from_parts(345_045, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `790 + r * (6 ±0)` - // Estimated: `6739 + r * (6 ±0)` - // Minimum execution time: 241_342_000 picoseconds. - Weight::from_parts(240_875_314, 6739) - // Standard Error: 669 - .saturating_add(Weight::from_parts(324_519, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `6815 + r * (6 ±0)` + // Minimum execution time: 267_337_000 picoseconds. + Weight::from_parts(283_693_170, 6815) + // Standard Error: 580 + .saturating_add(Weight::from_parts(345_350, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787 + r * (6 ±0)` - // Estimated: `6737 + r * (6 ±0)` - // Minimum execution time: 238_954_000 picoseconds. - Weight::from_parts(242_269_896, 6737) - // Standard Error: 1_453 - .saturating_add(Weight::from_parts(317_998, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `863 + r * (6 ±0)` + // Estimated: `6813 + r * (6 ±0)` + // Minimum execution time: 276_313_000 picoseconds. + Weight::from_parts(287_689_703, 6813) + // Standard Error: 1_251 + .saturating_add(Weight::from_parts(342_536, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (6 ±0)` - // Estimated: `6723 + r * (6 ±0)` - // Minimum execution time: 240_935_000 picoseconds. - Weight::from_parts(242_938_271, 6723) - // Standard Error: 792 - .saturating_add(Weight::from_parts(316_782, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `854 + r * (6 ±0)` + // Estimated: `6799 + r * (6 ±0)` + // Minimum execution time: 274_196_000 picoseconds. + Weight::from_parts(288_641_687, 6799) + // Standard Error: 530 + .saturating_add(Weight::from_parts(336_194, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + r * (14 ±0)` - // Estimated: `6785 + r * (14 ±0)` - // Minimum execution time: 240_142_000 picoseconds. - Weight::from_parts(241_386_730, 6785) - // Standard Error: 2_116 - .saturating_add(Weight::from_parts(1_387_202, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `928 + r * (14 ±0)` + // Estimated: `6861 + r * (14 ±0)` + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(292_260_891, 6861) + // Standard Error: 1_019 + .saturating_add(Weight::from_parts(1_447_021, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1600]`. - fn seal_gas(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `745 + r * (4 ±0)` - // Estimated: `6687 + r * (4 ±0)` - // Minimum execution time: 165_617_000 picoseconds. - Weight::from_parts(170_794_127, 6687) - // Standard Error: 209 - .saturating_add(Weight::from_parts(127_931, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `780 + r * (6 ±0)` - // Estimated: `6724 + r * (6 ±0)` - // Minimum execution time: 238_832_000 picoseconds. - Weight::from_parts(237_110_694, 6724) - // Standard Error: 539 - .saturating_add(Weight::from_parts(280_610, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `856 + r * (6 ±0)` + // Estimated: `6800 + r * (6 ±0)` + // Minimum execution time: 272_720_000 picoseconds. + Weight::from_parts(287_125_181, 6800) + // Standard Error: 491 + .saturating_add(Weight::from_parts(294_488, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `784` - // Estimated: `6724` - // Minimum execution time: 241_070_000 picoseconds. - Weight::from_parts(242_162_279, 6724) - // Standard Error: 1 - .saturating_add(Weight::from_parts(595, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `860` + // Estimated: `6800` + // Minimum execution time: 280_665_000 picoseconds. + Weight::from_parts(233_022_448, 6800) + // Standard Error: 23 + .saturating_add(Weight::from_parts(996, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `768 + r * (45 ±0)` - // Estimated: `6708 + r * (45 ±0)` - // Minimum execution time: 236_337_000 picoseconds. - Weight::from_parts(238_883_828, 6708) - // Standard Error: 188_978 - .saturating_add(Weight::from_parts(926_671, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `844 + r * (45 ±0)` + // Estimated: `6784 + r * (45 ±0)` + // Minimum execution time: 250_335_000 picoseconds. + Weight::from_parts(278_774_071, 6784) + // Standard Error: 873_509 + .saturating_add(Weight::from_parts(4_562_628, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778` - // Estimated: `6731` - // Minimum execution time: 239_103_000 picoseconds. - Weight::from_parts(240_382_910, 6731) + // Measured: `854` + // Estimated: `6807` + // Minimum execution time: 278_402_000 picoseconds. + Weight::from_parts(285_491_021, 6807) // Standard Error: 0 - .saturating_add(Weight::from_parts(181, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:4 w:4) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts DeletionQueueCounter (r:1 w:1) - /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts DeletionQueue (r:0 w:1) - /// Proof: Contracts DeletionQueue (max_values: None, max_size: Some(142), added: 2617, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:4 w:4) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::DeletionQueue` (r:0 w:1) + /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `810 + r * (356 ±0)` - // Estimated: `6750 + r * (7781 ±0)` - // Minimum execution time: 238_739_000 picoseconds. - Weight::from_parts(241_041_330, 6750) - // Standard Error: 176_820 - .saturating_add(Weight::from_parts(115_332_869, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `2963 + r * (400 ±0)` + // Estimated: `8903 + r * (7825 ±0)` + // Minimum execution time: 281_030_000 picoseconds. + Weight::from_parts(305_435_226, 8903) + // Standard Error: 816_824 + .saturating_add(Weight::from_parts(131_691_873, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((8_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 7781).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(T::DbWeight::get().writes((9_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 7825).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `825 + r * (10 ±0)` - // Estimated: `6769 + r * (10 ±0)` - // Minimum execution time: 240_888_000 picoseconds. - Weight::from_parts(259_901_113, 6769) - // Standard Error: 5_935 - .saturating_add(Weight::from_parts(1_764_269, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `935 + r * (10 ±0)` + // Estimated: `6876 + r * (10 ±0)` + // Minimum execution time: 261_369_000 picoseconds. + Weight::from_parts(300_458_315, 6876) + // Standard Error: 3_506 + .saturating_add(Weight::from_parts(1_971_733, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (10 ±0)` - // Estimated: `6723 + r * (10 ±0)` - // Minimum execution time: 237_478_000 picoseconds. - Weight::from_parts(264_915_436, 6723) - // Standard Error: 4_644 - .saturating_add(Weight::from_parts(3_452_918, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `854 + r * (10 ±0)` + // Estimated: `6799 + r * (10 ±0)` + // Minimum execution time: 262_894_000 picoseconds. + Weight::from_parts(285_321_838, 6799) + // Standard Error: 6_585 + .saturating_add(Weight::from_parts(3_998_744, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:6 w:6) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:6 w:6) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `797 + t * (32 ±0)` - // Estimated: `6744 + t * (2508 ±0)` - // Minimum execution time: 255_720_000 picoseconds. - Weight::from_parts(247_945_758, 6744) - // Standard Error: 73_390 - .saturating_add(Weight::from_parts(2_483_239, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(756, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `873 + t * (32 ±0)` + // Estimated: `6820 + t * (2508 ±0)` + // Minimum execution time: 275_909_000 picoseconds. + Weight::from_parts(289_251_568, 6820) + // Standard Error: 94_431 + .saturating_add(Weight::from_parts(3_007_409, 0).saturating_mul(t.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(815, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `777 + r * (7 ±0)` - // Estimated: `6721 + r * (7 ±0)` - // Minimum execution time: 172_214_000 picoseconds. - Weight::from_parts(177_306_567, 6721) - // Standard Error: 839 - .saturating_add(Weight::from_parts(230_558, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `853 + r * (7 ±0)` + // Estimated: `6797 + r * (7 ±0)` + // Minimum execution time: 168_482_000 picoseconds. + Weight::from_parts(178_065_606, 6797) + // Standard Error: 371 + .saturating_add(Weight::from_parts(242_851, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: MaxEncodedLen) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: MaxEncodedLen) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `MaxEncodedLen`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125728` - // Estimated: `131670` - // Minimum execution time: 354_105_000 picoseconds. - Weight::from_parts(360_649_854, 131670) - // Standard Error: 2 - .saturating_add(Weight::from_parts(737, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `125804` + // Estimated: `131746` + // Minimum execution time: 407_401_000 picoseconds. + Weight::from_parts(426_585_443, 131746) + // Standard Error: 22 + .saturating_add(Weight::from_parts(986, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `845 + r * (292 ±0)` - // Estimated: `843 + r * (293 ±0)` - // Minimum execution time: 239_637_000 picoseconds. - Weight::from_parts(136_431_436, 843) - // Standard Error: 10_238 - .saturating_add(Weight::from_parts(6_070_221, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `921 + r * (292 ±0)` + // Estimated: `919 + r * (293 ±0)` + // Minimum execution time: 275_800_000 picoseconds. + Weight::from_parts(161_230_700, 919) + // Standard Error: 12_908 + .saturating_add(Weight::from_parts(6_965_844, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1304` - // Estimated: `1280` - // Minimum execution time: 256_198_000 picoseconds. - Weight::from_parts(289_972_802, 1280) - // Standard Error: 54 - .saturating_add(Weight::from_parts(438, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1380` + // Estimated: `1356` + // Minimum execution time: 289_258_000 picoseconds. + Weight::from_parts(334_318_402, 1356) + // Standard Error: 59 + .saturating_add(Weight::from_parts(808, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1167 + n * (1 ±0)` - // Estimated: `1167 + n * (1 ±0)` - // Minimum execution time: 255_519_000 picoseconds. - Weight::from_parts(257_668_217, 1167) - // Standard Error: 19 - .saturating_add(Weight::from_parts(105, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1243 + n * (1 ±0)` + // Estimated: `1243 + n * (1 ±0)` + // Minimum execution time: 277_874_000 picoseconds. + Weight::from_parts(303_956_600, 1243) + // Standard Error: 33 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `841 + r * (288 ±0)` - // Estimated: `845 + r * (289 ±0)` - // Minimum execution time: 239_461_000 picoseconds. - Weight::from_parts(131_630_528, 845) - // Standard Error: 10_483 - .saturating_add(Weight::from_parts(5_910_066, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `917 + r * (288 ±0)` + // Estimated: `921 + r * (289 ±0)` + // Minimum execution time: 255_230_000 picoseconds. + Weight::from_parts(163_226_984, 921) + // Standard Error: 12_691 + .saturating_add(Weight::from_parts(6_808_905, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1163 + n * (1 ±0)` - // Estimated: `1163 + n * (1 ±0)` - // Minimum execution time: 254_904_000 picoseconds. - Weight::from_parts(261_213_399, 1163) - // Standard Error: 178 - .saturating_add(Weight::from_parts(125, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1239 + n * (1 ±0)` + // Estimated: `1239 + n * (1 ±0)` + // Minimum execution time: 275_780_000 picoseconds. + Weight::from_parts(301_967_262, 1239) + // Standard Error: 34 + .saturating_add(Weight::from_parts(128, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `835 + r * (296 ±0)` - // Estimated: `840 + r * (297 ±0)` - // Minimum execution time: 239_995_000 picoseconds. - Weight::from_parts(151_326_508, 840) - // Standard Error: 8_960 - .saturating_add(Weight::from_parts(4_937_728, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `911 + r * (296 ±0)` + // Estimated: `916 + r * (297 ±0)` + // Minimum execution time: 279_295_000 picoseconds. + Weight::from_parts(208_289_066, 916) + // Standard Error: 8_330 + .saturating_add(Weight::from_parts(5_600_713, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1179 + n * (1 ±0)` - // Estimated: `1179 + n * (1 ±0)` - // Minimum execution time: 254_515_000 picoseconds. - Weight::from_parts(256_728_817, 1179) - // Standard Error: 22 - .saturating_add(Weight::from_parts(706, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 276_745_000 picoseconds. + Weight::from_parts(298_824_233, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(717, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `856 + r * (288 ±0)` - // Estimated: `857 + r * (289 ±0)` - // Minimum execution time: 240_601_000 picoseconds. - Weight::from_parts(154_476_561, 857) - // Standard Error: 8_872 - .saturating_add(Weight::from_parts(4_805_043, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `932 + r * (288 ±0)` + // Estimated: `933 + r * (289 ±0)` + // Minimum execution time: 275_137_000 picoseconds. + Weight::from_parts(196_695_898, 933) + // Standard Error: 9_207 + .saturating_add(Weight::from_parts(5_466_071, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1166 + n * (1 ±0)` - // Estimated: `1166 + n * (1 ±0)` - // Minimum execution time: 253_654_000 picoseconds. - Weight::from_parts(257_288_586, 1166) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1242 + n * (1 ±0)` + // Estimated: `1242 + n * (1 ±0)` + // Minimum execution time: 269_315_000 picoseconds. + Weight::from_parts(296_795_271, 1242) + // Standard Error: 39 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `829 + r * (296 ±0)` - // Estimated: `836 + r * (297 ±0)` - // Minimum execution time: 239_869_000 picoseconds. - Weight::from_parts(135_258_204, 836) - // Standard Error: 10_378 - .saturating_add(Weight::from_parts(6_144_770, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `905 + r * (296 ±0)` + // Estimated: `912 + r * (297 ±0)` + // Minimum execution time: 256_406_000 picoseconds. + Weight::from_parts(156_850_288, 912) + // Standard Error: 12_496 + .saturating_add(Weight::from_parts(7_055_305, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1180 + n * (1 ±0)` - // Estimated: `1180 + n * (1 ±0)` - // Minimum execution time: 258_153_000 picoseconds. - Weight::from_parts(260_068_186, 1180) - // Standard Error: 25 - .saturating_add(Weight::from_parts(744, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 280_297_000 picoseconds. + Weight::from_parts(302_241_752, 1256) + // Standard Error: 34 + .saturating_add(Weight::from_parts(748, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: System Account (r:1602 w:1601) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1602 w:1601) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1373 + r * (45 ±0)` - // Estimated: `7270 + r * (2520 ±0)` - // Minimum execution time: 243_189_000 picoseconds. - Weight::from_parts(243_465_000, 7270) - // Standard Error: 30_961 - .saturating_add(Weight::from_parts(35_376_623, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1449 + r * (45 ±0)` + // Estimated: `7346 + r * (2520 ±0)` + // Minimum execution time: 274_834_000 picoseconds. + Weight::from_parts(176_977_557, 7346) + // Standard Error: 32_386 + .saturating_add(Weight::from_parts(39_393_162, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:801 w:801) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:803 w:803) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:803 w:803) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1140 + r * (276 ±0)` - // Estimated: `9332 + r * (2752 ±0)` - // Minimum execution time: 243_656_000 picoseconds. - Weight::from_parts(244_221_000, 9332) - // Standard Error: 69_762 - .saturating_add(Weight::from_parts(216_905_619, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1304 + r * (268 ±0)` + // Estimated: `9485 + r * (2744 ±0)` + // Minimum execution time: 279_802_000 picoseconds. + Weight::from_parts(287_995_000, 9485) + // Standard Error: 99_110 + .saturating_add(Weight::from_parts(245_521_843, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2752).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:736 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:737 w:737) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2744).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:736 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:737 w:737) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (502 ±0)` - // Estimated: `6727 + r * (2572 ±10)` - // Minimum execution time: 242_632_000 picoseconds. - Weight::from_parts(243_068_000, 6727) - // Standard Error: 126_218 - .saturating_add(Weight::from_parts(213_096_291, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) + // Measured: `0 + r * (576 ±0)` + // Estimated: `6803 + r * (2637 ±3)` + // Minimum execution time: 273_435_000 picoseconds. + Weight::from_parts(276_865_000, 6803) + // Standard Error: 148_051 + .saturating_add(Weight::from_parts(244_660_274, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2572).saturating_mul(r.into())) - } - /// Storage: System Account (r:3 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:2 w:2) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:4 w:4) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2637).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:3 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:4 w:4) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1154 + t * (204 ±0)` - // Estimated: `12044 + t * (5154 ±0)` - // Minimum execution time: 421_691_000 picoseconds. - Weight::from_parts(394_587_369, 12044) - // Standard Error: 1_104_014 - .saturating_add(Weight::from_parts(30_461_758, 0).saturating_mul(t.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(601, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) + // Measured: `1322 + t * (310 ±0)` + // Estimated: `12212 + t * (5260 ±0)` + // Minimum execution time: 477_593_000 picoseconds. + Weight::from_parts(69_887_451, 12212) + // Standard Error: 11_764_606 + .saturating_add(Weight::from_parts(373_361_977, 0).saturating_mul(t.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_000, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 5154).saturating_mul(t.into())) - } - /// Storage: System Account (r:1602 w:1602) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:801 w:801) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:801 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:800 w:800) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:802 w:802) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 5260).saturating_mul(t.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1602 w:1602) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:801 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:802 w:802) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1322 + r * (254 ±0)` - // Estimated: `7146 + r * (5205 ±0)` - // Minimum execution time: 581_252_000 picoseconds. - Weight::from_parts(582_275_000, 7146) - // Standard Error: 279_771 - .saturating_add(Weight::from_parts(349_770_967, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1380 + r * (255 ±0)` + // Estimated: `7204 + r * (5206 ±0)` + // Minimum execution time: 652_387_000 picoseconds. + Weight::from_parts(658_670_000, 7204) + // Standard Error: 363_054 + .saturating_add(Weight::from_parts(395_547_049, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5205).saturating_mul(r.into())) - } - /// Storage: System Account (r:4 w:4) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:2 w:2) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:4 w:4) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1071 + t * (187 ±0)` - // Estimated: `9492 + t * (2634 ±2)` - // Minimum execution time: 1_623_241_000 picoseconds. - Weight::from_parts(317_076_173, 9492) - // Standard Error: 4_549_416 - .saturating_add(Weight::from_parts(125_360_446, 0).saturating_mul(t.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_183, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_352, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + // Measured: `1233 + t * (156 ±0)` + // Estimated: `9663 + t * (2578 ±2)` + // Minimum execution time: 2_299_620_000 picoseconds. + Weight::from_parts(1_274_859_063, 9663) + // Standard Error: 12_129_871 + .saturating_add(Weight::from_parts(16_608_792, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_180, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(10_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2634).saturating_mul(t.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2578).saturating_mul(t.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `777 + r * (8 ±0)` - // Estimated: `6718 + r * (8 ±0)` - // Minimum execution time: 238_262_000 picoseconds. - Weight::from_parts(243_093_288, 6718) - // Standard Error: 870 - .saturating_add(Weight::from_parts(573_939, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `853 + r * (8 ±0)` + // Estimated: `6794 + r * (8 ±0)` + // Minimum execution time: 267_959_000 picoseconds. + Weight::from_parts(282_967_946, 6794) + // Standard Error: 624 + .saturating_add(Weight::from_parts(402_344, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `785` - // Estimated: `6725` - // Minimum execution time: 239_888_000 picoseconds. - Weight::from_parts(242_849_333, 6725) - // Standard Error: 3 - .saturating_add(Weight::from_parts(3_949, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `861` + // Estimated: `6801` + // Minimum execution time: 274_585_000 picoseconds. + Weight::from_parts(272_480_647, 6801) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6721 + r * (8 ±0)` - // Minimum execution time: 237_288_000 picoseconds. - Weight::from_parts(242_510_631, 6721) - // Standard Error: 977 - .saturating_add(Weight::from_parts(742_726, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6797 + r * (8 ±0)` + // Minimum execution time: 268_346_000 picoseconds. + Weight::from_parts(284_168_231, 6797) + // Standard Error: 620 + .saturating_add(Weight::from_parts(805_038, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6729` - // Minimum execution time: 240_006_000 picoseconds. - Weight::from_parts(233_802_510, 6729) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_161, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `863` + // Estimated: `6805` + // Minimum execution time: 273_073_000 picoseconds. + Weight::from_parts(280_346_065, 6805) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_357, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6724 + r * (8 ±0)` - // Minimum execution time: 237_532_000 picoseconds. - Weight::from_parts(243_087_565, 6724) - // Standard Error: 656 - .saturating_add(Weight::from_parts(417_850, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6800 + r * (8 ±0)` + // Minimum execution time: 263_072_000 picoseconds. + Weight::from_parts(284_487_433, 6800) + // Standard Error: 668 + .saturating_add(Weight::from_parts(458_763, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6733` - // Minimum execution time: 241_429_000 picoseconds. - Weight::from_parts(233_528_258, 6733) + // Measured: `863` + // Estimated: `6809` + // Minimum execution time: 271_488_000 picoseconds. + Weight::from_parts(273_877_727, 6809) // Standard Error: 1 - .saturating_add(Weight::from_parts(913, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6725 + r * (8 ±0)` - // Minimum execution time: 237_622_000 picoseconds. - Weight::from_parts(240_476_401, 6725) - // Standard Error: 795 - .saturating_add(Weight::from_parts(416_869, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6801 + r * (8 ±0)` + // Minimum execution time: 271_365_000 picoseconds. + Weight::from_parts(285_100_883, 6801) + // Standard Error: 651 + .saturating_add(Weight::from_parts(462_754, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6727` - // Minimum execution time: 241_134_000 picoseconds. - Weight::from_parts(234_043_271, 6727) - // Standard Error: 3 - .saturating_add(Weight::from_parts(919, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `863` + // Estimated: `6803` + // Minimum execution time: 272_341_000 picoseconds. + Weight::from_parts(275_388_470, 6803) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `912 + n * (1 ±0)` - // Estimated: `6849 + n * (1 ±0)` - // Minimum execution time: 292_699_000 picoseconds. - Weight::from_parts(301_523_608, 6849) - // Standard Error: 14 - .saturating_add(Weight::from_parts(4_676, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `988 + n * (1 ±0)` + // Estimated: `6925 + n * (1 ±0)` + // Minimum execution time: 341_302_000 picoseconds. + Weight::from_parts(354_111_630, 6925) + // Standard Error: 8 + .saturating_add(Weight::from_parts(5_913, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `727 + r * (112 ±0)` - // Estimated: `6666 + r * (112 ±0)` - // Minimum execution time: 241_126_000 picoseconds. - Weight::from_parts(248_796_458, 6666) - // Standard Error: 21_501 - .saturating_add(Weight::from_parts(48_091_265, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `804 + r * (112 ±0)` + // Estimated: `6742 + r * (112 ±0)` + // Minimum execution time: 275_325_000 picoseconds. + Weight::from_parts(333_041_903, 6742) + // Standard Error: 11_171 + .saturating_add(Weight::from_parts(56_605_218, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `822 + r * (76 ±0)` - // Estimated: `6717 + r * (77 ±0)` - // Minimum execution time: 242_379_000 picoseconds. - Weight::from_parts(261_355_525, 6717) - // Standard Error: 18_862 - .saturating_add(Weight::from_parts(37_603_073, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `898 + r * (76 ±0)` + // Estimated: `6793 + r * (77 ±0)` + // Minimum execution time: 274_165_000 picoseconds. + Weight::from_parts(347_487_800, 6793) + // Standard Error: 15_398 + .saturating_add(Weight::from_parts(46_072_020, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `792 + r * (42 ±0)` - // Estimated: `6731 + r * (42 ±0)` - // Minimum execution time: 241_270_000 picoseconds. - Weight::from_parts(245_135_291, 6731) - // Standard Error: 10_757 - .saturating_add(Weight::from_parts(9_344_876, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `868 + r * (42 ±0)` + // Estimated: `6807 + r * (42 ±0)` + // Minimum execution time: 270_855_000 picoseconds. + Weight::from_parts(320_777_105, 6807) + // Standard Error: 11_106 + .saturating_add(Weight::from_parts(12_053_053, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1536 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1536 w:1536) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1538 w:1538) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1536 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1538 w:1538) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (964 ±0)` - // Estimated: `8190 + r * (3090 ±7)` - // Minimum execution time: 240_506_000 picoseconds. - Weight::from_parts(241_653_000, 8190) - // Standard Error: 46_785 - .saturating_add(Weight::from_parts(22_107_816, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `0 + r * (965 ±0)` + // Estimated: `6798 + r * (3090 ±10)` + // Minimum execution time: 257_732_000 picoseconds. + Weight::from_parts(280_982_000, 6798) + // Standard Error: 68_194 + .saturating_add(Weight::from_parts(27_413_991, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `r` is `[0, 32]`. + fn add_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `918 + r * (132 ±0)` + // Estimated: `6870 + r * (2606 ±0)` + // Minimum execution time: 278_285_000 picoseconds. + Weight::from_parts(298_012_554, 6870) + // Standard Error: 24_160 + .saturating_add(Weight::from_parts(6_363_118, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `MaxEncodedLen`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `r` is `[0, 32]`. + fn remove_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `965 + r * (183 ±0)` + // Estimated: `129453 + r * (2568 ±0)` + // Minimum execution time: 258_198_000 picoseconds. + Weight::from_parts(290_090_206, 129453) + // Standard Error: 19_792 + .saturating_add(Weight::from_parts(6_004_811, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `773 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 241_539_000 picoseconds. - Weight::from_parts(245_471_045, 6723) - // Standard Error: 416 - .saturating_add(Weight::from_parts(159_577, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `849 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 263_315_000 picoseconds. + Weight::from_parts(284_093_748, 6799) + // Standard Error: 371 + .saturating_add(Weight::from_parts(176_949, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1975 + r * (39 ±0)` - // Estimated: `7805 + r * (40 ±0)` - // Minimum execution time: 242_702_000 picoseconds. - Weight::from_parts(274_518_595, 7805) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(256_973, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `2082 + r * (39 ±0)` + // Estimated: `7886 + r * (40 ±0)` + // Minimum execution time: 274_583_000 picoseconds. + Weight::from_parts(352_081_486, 7886) + // Standard Error: 1_799 + .saturating_add(Weight::from_parts(313_433, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `776 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 239_360_000 picoseconds. - Weight::from_parts(245_990_810, 6723) - // Standard Error: 3_188 - .saturating_add(Weight::from_parts(143_408, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `852 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 267_291_000 picoseconds. + Weight::from_parts(287_500_540, 6799) + // Standard Error: 393 + .saturating_add(Weight::from_parts(152_587, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -1676,2009 +1995,1868 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_617_000 picoseconds. - Weight::from_parts(1_900_268, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(2_950, 0).saturating_mul(r.into())) + // Minimum execution time: 1_440_000 picoseconds. + Weight::from_parts(1_656_631, 0) + // Standard Error: 13 + .saturating_add(Weight::from_parts(10_486, 0).saturating_mul(r.into())) } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64load(r: u32, ) -> Weight { +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:0) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_739_000 picoseconds. - Weight::from_parts(2_109_373, 0) - // Standard Error: 43 - .saturating_add(Weight::from_parts(6_586, 0).saturating_mul(r.into())) + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 2_527_000 picoseconds. + Weight::from_parts(2_651_000, 1627) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64store(r: u32, ) -> Weight { + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `k` is `[0, 1024]`. + fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_726_000 picoseconds. - Weight::from_parts(2_268_507, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(6_022, 0).saturating_mul(r.into())) + // Measured: `451 + k * (69 ±0)` + // Estimated: `441 + k * (70 ±0)` + // Minimum execution time: 13_291_000 picoseconds. + Weight::from_parts(13_825_000, 441) + // Standard Error: 1_137 + .saturating_add(Weight::from_parts(1_244_309, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// The range of component `r` is `[0, 5000]`. - fn instr_select(r: u32, ) -> Weight { + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) + /// The range of component `c` is `[0, 125952]`. + fn v9_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_628_000 picoseconds. - Weight::from_parts(2_042_521, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(7_935, 0).saturating_mul(r.into())) + // Measured: `211 + c * (1 ±0)` + // Estimated: `6149 + c * (1 ±0)` + // Minimum execution time: 8_359_000 picoseconds. + Weight::from_parts(9_179_121, 6149) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_312, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// The range of component `r` is `[0, 5000]`. - fn instr_if(r: u32, ) -> Weight { + /// Storage: `Contracts::ContractInfoOf` (r:3 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + fn v10_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `709` + // Estimated: `9124` + // Minimum execution time: 42_457_000 picoseconds. + Weight::from_parts(44_556_000, 9124) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) + /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// The range of component `k` is `[0, 1024]`. + fn v11_migration_step(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `171 + k * (1 ±0)` + // Estimated: `3635 + k * (1 ±0)` + // Minimum execution time: 3_839_000 picoseconds. + Weight::from_parts(3_462_337, 3635) + // Standard Error: 1_384 + .saturating_add(Weight::from_parts(1_166_522, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) + } + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_648_000 picoseconds. - Weight::from_parts(1_902_691, 0) - // Standard Error: 7 - .saturating_add(Weight::from_parts(10_572, 0).saturating_mul(r.into())) + // Measured: `325 + c * (1 ±0)` + // Estimated: `6263 + c * (1 ±0)` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_095_380, 6263) + // Standard Error: 1 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// The range of component `r` is `[0, 5000]`. - fn instr_br(r: u32, ) -> Weight { + /// Storage: `Contracts::ContractInfoOf` (r:3 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + fn v13_migration_step() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_626_000 picoseconds. - Weight::from_parts(1_891_843, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(4_612, 0).saturating_mul(r.into())) + // Measured: `639` + // Estimated: `9054` + // Minimum execution time: 35_342_000 picoseconds. + Weight::from_parts(36_839_000, 9054) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_br_if(r: u32, ) -> Weight { + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + fn v14_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `260` + // Estimated: `6200` + // Minimum execution time: 27_995_000 picoseconds. + Weight::from_parts(28_661_000, 6200) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn migration_noop() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 3_272_000 picoseconds. + Weight::from_parts(3_553_000, 1627) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) + fn migrate() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_581_000 picoseconds. - Weight::from_parts(1_139_823, 0) - // Standard Error: 74 - .saturating_add(Weight::from_parts(8_008, 0).saturating_mul(r.into())) + // Measured: `166` + // Estimated: `3631` + // Minimum execution time: 12_788_000 picoseconds. + Weight::from_parts(13_163_000, 3631) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_br_table(r: u32, ) -> Weight { + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + fn on_runtime_upgrade_noop() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_591_000 picoseconds. - Weight::from_parts(1_258_400, 0) - // Standard Error: 34 - .saturating_add(Weight::from_parts(9_706, 0).saturating_mul(r.into())) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 4_794_000 picoseconds. + Weight::from_parts(5_086_000, 3607) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `e` is `[1, 256]`. - fn instr_br_table_per_entry(e: u32, ) -> Weight { + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn on_runtime_upgrade_in_progress() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_701_000 picoseconds. - Weight::from_parts(1_876_118, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(e.into())) + // Measured: `167` + // Estimated: `3632` + // Minimum execution time: 6_616_000 picoseconds. + Weight::from_parts(7_034_000, 3632) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_call(r: u32, ) -> Weight { + /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + fn on_runtime_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_617_000 picoseconds. - Weight::from_parts(1_565_613, 0) - // Standard Error: 629 - .saturating_add(Weight::from_parts(19_575, 0).saturating_mul(r.into())) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 6_985_000 picoseconds. + Weight::from_parts(7_477_000, 3607) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_call_indirect(r: u32, ) -> Weight { + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_875_000 picoseconds. - Weight::from_parts(4_549_584, 0) - // Standard Error: 278 - .saturating_add(Weight::from_parts(24_336, 0).saturating_mul(r.into())) + // Measured: `783` + // Estimated: `6732 + c * (1 ±0)` + // Minimum execution time: 306_088_000 picoseconds. + Weight::from_parts(268_361_911, 6732) + // Standard Error: 76 + .saturating_add(Weight::from_parts(38_334, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// The range of component `l` is `[0, 1024]`. - fn instr_call_per_local(l: u32, ) -> Weight { + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + /// The range of component `i` is `[0, 1048576]`. + /// The range of component `s` is `[0, 1048576]`. + fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_742_000 picoseconds. - Weight::from_parts(2_087_387, 0) + // Measured: `303` + // Estimated: `8745` + // Minimum execution time: 4_224_657_000 picoseconds. + Weight::from_parts(451_557_864, 8745) + // Standard Error: 216 + .saturating_add(Weight::from_parts(111_761, 0).saturating_mul(c.into())) // Standard Error: 26 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(l.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_get(r: u32, ) -> Weight { + .saturating_add(Weight::from_parts(1_794, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(2_013, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1048576]`. + /// The range of component `s` is `[0, 1048576]`. + fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(3_552_428, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(2_339, 0).saturating_mul(r.into())) + // Measured: `527` + // Estimated: `6517` + // Minimum execution time: 2_029_313_000 picoseconds. + Weight::from_parts(353_077_600, 6517) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_781, 0).saturating_mul(i.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_729, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_set(r: u32, ) -> Weight { + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_866_000 picoseconds. - Weight::from_parts(3_151_948, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(3_667, 0).saturating_mul(r.into())) + // Measured: `817` + // Estimated: `6757` + // Minimum execution time: 204_086_000 picoseconds. + Weight::from_parts(216_738_000, 6757) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_tee(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_919_000 picoseconds. - Weight::from_parts(3_214_587, 0) - // Standard Error: 5 - .saturating_add(Weight::from_parts(3_867, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_global_get(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_764_000 picoseconds. - Weight::from_parts(1_815_683, 0) - // Standard Error: 123 - .saturating_add(Weight::from_parts(8_733, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_global_set(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_783_000 picoseconds. - Weight::from_parts(2_437_152, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(8_839, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_memory_current(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_745_000 picoseconds. - Weight::from_parts(2_018_078, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_756, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 16]`. - fn instr_memory_grow(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_648_000 picoseconds. - Weight::from_parts(648_059, 0) - // Standard Error: 142_299 - .saturating_add(Weight::from_parts(13_313_060, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64clz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_652_000 picoseconds. - Weight::from_parts(1_953_179, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_828, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ctz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_607_000 picoseconds. - Weight::from_parts(1_924_759, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_762, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64popcnt(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_687_000 picoseconds. - Weight::from_parts(1_959_683, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_754, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64eqz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_641_000 picoseconds. - Weight::from_parts(1_975_838, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_681, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64extendsi32(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_689_000 picoseconds. - Weight::from_parts(1_980_109, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_880, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64extendui32(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_671_000 picoseconds. - Weight::from_parts(1_912_089, 0) - // Standard Error: 29 - .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i32wrapi64(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_643_000 picoseconds. - Weight::from_parts(1_951_485, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_725, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64eq(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_649_000 picoseconds. - Weight::from_parts(1_937_598, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(6_045, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ne(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_651_000 picoseconds. - Weight::from_parts(2_202_977, 0) - // Standard Error: 313 - .saturating_add(Weight::from_parts(6_299, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64lts(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_589_000 picoseconds. - Weight::from_parts(1_946_304, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(6_019, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ltu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_614_000 picoseconds. - Weight::from_parts(1_933_375, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(6_020, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64gts(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_678_000 picoseconds. - Weight::from_parts(2_003_850, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_816, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64gtu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_651_000 picoseconds. - Weight::from_parts(1_971_321, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_114, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64les(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_647_000 picoseconds. - Weight::from_parts(2_017_232, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_990, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64leu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_635_000 picoseconds. - Weight::from_parts(3_232_848, 0) - // Standard Error: 105 - .saturating_add(Weight::from_parts(5_816, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ges(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_623_000 picoseconds. - Weight::from_parts(1_996_165, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_964, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64geu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_668_000 picoseconds. - Weight::from_parts(1_973_238, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(6_021, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64add(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_674_000 picoseconds. - Weight::from_parts(1_981_762, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_898, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64sub(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_632_000 picoseconds. - Weight::from_parts(1_935_700, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_154, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64mul(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_607_000 picoseconds. - Weight::from_parts(1_942_734, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_797, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64divs(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_611_000 picoseconds. - Weight::from_parts(2_960_454, 0) - // Standard Error: 177 - .saturating_add(Weight::from_parts(11_666, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64divu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_641_000 picoseconds. - Weight::from_parts(2_104_200, 0) - // Standard Error: 5 - .saturating_add(Weight::from_parts(10_540, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rems(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_643_000 picoseconds. - Weight::from_parts(2_602_908, 0) - // Standard Error: 24 - .saturating_add(Weight::from_parts(11_900, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64remu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_584_000 picoseconds. - Weight::from_parts(2_056_817, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(10_722, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64and(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_652_000 picoseconds. - Weight::from_parts(1_988_892, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(5_683, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64or(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(2_148_537, 0) - // Standard Error: 38 - .saturating_add(Weight::from_parts(5_756, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64xor(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_629_000 picoseconds. - Weight::from_parts(1_955_010, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_931, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shl(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_569_000 picoseconds. - Weight::from_parts(1_982_403, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_867, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shrs(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_615_000 picoseconds. - Weight::from_parts(1_989_920, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_137, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shru(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_646_000 picoseconds. - Weight::from_parts(2_020_935, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rotl(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_661_000 picoseconds. - Weight::from_parts(2_320_710, 0) - // Standard Error: 27 - .saturating_add(Weight::from_parts(5_922, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rotr(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_674_000 picoseconds. - Weight::from_parts(2_044_188, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_855, 0).saturating_mul(r.into())) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: Contracts DeletionQueueCounter (r:1 w:0) - /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - fn on_process_deletion_queue_batch() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `1594` - // Minimum execution time: 2_627_000 picoseconds. - Weight::from_parts(2_748_000, 1594) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) - /// The range of component `k` is `[0, 1024]`. - fn on_initialize_per_trie_key(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `488 + k * (69 ±0)` - // Estimated: `478 + k * (70 ±0)` - // Minimum execution time: 13_607_000 picoseconds. - Weight::from_parts(8_026_118, 478) - // Standard Error: 1_323 - .saturating_add(Weight::from_parts(980_583, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) - } - /// Storage: Contracts PristineCode (r:1 w:0) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// The range of component `c` is `[0, 61717]`. - fn reinstrument(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `238 + c * (1 ±0)` - // Estimated: `3708 + c * (1 ±0)` - // Minimum execution time: 30_563_000 picoseconds. - Weight::from_parts(22_292_544, 3708) - // Standard Error: 60 - .saturating_add(Weight::from_parts(54_541, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. - fn call_with_code_per_byte(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `6656 + c * (1 ±0)` - // Minimum execution time: 268_884_000 picoseconds. - Weight::from_parts(277_799_331, 6656) - // Standard Error: 23 - .saturating_add(Weight::from_parts(37_876, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// The range of component `c` is `[0, 61717]`. - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `8659` - // Minimum execution time: 3_159_921_000 picoseconds. - Weight::from_parts(594_826_134, 8659) - // Standard Error: 290 - .saturating_add(Weight::from_parts(106_471, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_417, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) - } - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate(i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `6408` - // Minimum execution time: 1_653_811_000 picoseconds. - Weight::from_parts(296_038_081, 6408) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_461, 0).saturating_mul(i.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_430, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().writes(7_u64)) - } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - fn call() -> Weight { - // Proof Size summary in bytes: - // Measured: `759` - // Estimated: `6699` - // Minimum execution time: 195_916_000 picoseconds. - Weight::from_parts(196_706_000, 6699) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1 w:1) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) - /// The range of component `c` is `[0, 61717]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 251_137_000 picoseconds. - Weight::from_parts(252_985_435, 3574) - // Standard Error: 88 - .saturating_add(Weight::from_parts(108_141, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 269_337_000 picoseconds. + Weight::from_parts(220_186_006, 3607) + // Standard Error: 106 + .saturating_add(Weight::from_parts(74_291, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1 w:1) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts CodeStorage (r:0 w:1) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Contracts PristineCode (r:0 w:1) - /// Proof: Contracts PristineCode (max_values: None, max_size: Some(125988), added: 128463, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 33_521_000 picoseconds. - Weight::from_parts(34_039_000, 3720) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `259` + // Estimated: `3724` + // Minimum execution time: 35_127_000 picoseconds. + Weight::from_parts(36_180_000, 3724) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:2 w:2) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `570` - // Estimated: `8985` - // Minimum execution time: 33_477_000 picoseconds. - Weight::from_parts(33_890_000, 8985) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `576` + // Estimated: `8991` + // Minimum execution time: 37_550_000 picoseconds. + Weight::from_parts(39_149_000, 8991) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `781 + r * (6 ±0)` - // Estimated: `6722 + r * (6 ±0)` - // Minimum execution time: 239_374_000 picoseconds. - Weight::from_parts(246_017_099, 6722) - // Standard Error: 539 - .saturating_add(Weight::from_parts(323_826, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `857 + r * (6 ±0)` + // Estimated: `6798 + r * (6 ±0)` + // Minimum execution time: 269_991_000 picoseconds. + Weight::from_parts(293_993_592, 6798) + // Standard Error: 665 + .saturating_add(Weight::from_parts(343_796, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `839 + r * (240 ±0)` - // Estimated: `6743 + r * (2715 ±0)` - // Minimum execution time: 240_656_000 picoseconds. - Weight::from_parts(87_361_934, 6743) - // Standard Error: 5_912 - .saturating_add(Weight::from_parts(3_329_840, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `924 + r * (232 ±0)` + // Estimated: `6831 + r * (2707 ±0)` + // Minimum execution time: 274_151_000 picoseconds. + Weight::from_parts(83_529_206, 6831) + // Standard Error: 8_452 + .saturating_add(Weight::from_parts(3_534_024, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2715).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1601 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2707).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `831 + r * (244 ±0)` - // Estimated: `6747 + r * (2719 ±0)` - // Minimum execution time: 243_026_000 picoseconds. - Weight::from_parts(76_953_007, 6747) - // Standard Error: 6_640 - .saturating_add(Weight::from_parts(4_132_521, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `910 + r * (236 ±0)` + // Estimated: `6835 + r * (2711 ±0)` + // Minimum execution time: 276_689_000 picoseconds. + Weight::from_parts(110_268_281, 6835) + // Standard Error: 8_106 + .saturating_add(Weight::from_parts(4_376_136, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2719).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2711).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `788 + r * (6 ±0)` - // Estimated: `6730 + r * (6 ±0)` - // Minimum execution time: 242_736_000 picoseconds. - Weight::from_parts(243_136_007, 6730) - // Standard Error: 912 - .saturating_add(Weight::from_parts(414_717, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `864 + r * (6 ±0)` + // Estimated: `6806 + r * (6 ±0)` + // Minimum execution time: 274_079_000 picoseconds. + Weight::from_parts(282_258_090, 6806) + // Standard Error: 1_343 + .saturating_add(Weight::from_parts(464_680, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 240_130_000 picoseconds. - Weight::from_parts(244_517_187, 6723) - // Standard Error: 384 - .saturating_add(Weight::from_parts(167_431, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `854 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 270_960_000 picoseconds. + Weight::from_parts(281_985_584, 6799) + // Standard Error: 378 + .saturating_add(Weight::from_parts(184_462, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + r * (3 ±0)` - // Estimated: `6608 + r * (3 ±0)` - // Minimum execution time: 228_022_000 picoseconds. - Weight::from_parts(232_385_198, 6608) - // Standard Error: 300 - .saturating_add(Weight::from_parts(145_143, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `744 + r * (3 ±0)` + // Estimated: `6684 + r * (3 ±0)` + // Minimum execution time: 244_835_000 picoseconds. + Weight::from_parts(270_660_753, 6684) + // Standard Error: 390 + .saturating_add(Weight::from_parts(164_232, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `782 + r * (6 ±0)` - // Estimated: `6724 + r * (6 ±0)` - // Minimum execution time: 240_250_000 picoseconds. - Weight::from_parts(240_268_824, 6724) - // Standard Error: 945 - .saturating_add(Weight::from_parts(329_577, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `858 + r * (6 ±0)` + // Estimated: `6800 + r * (6 ±0)` + // Minimum execution time: 273_269_000 picoseconds. + Weight::from_parts(274_468_168, 6800) + // Standard Error: 2_246 + .saturating_add(Weight::from_parts(386_838, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (6 ±0)` - // Estimated: `6719 + r * (6 ±0)` - // Minimum execution time: 242_370_000 picoseconds. - Weight::from_parts(242_389_500, 6719) - // Standard Error: 712 - .saturating_add(Weight::from_parts(518_380, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `854 + r * (6 ±0)` + // Estimated: `6795 + r * (6 ±0)` + // Minimum execution time: 275_244_000 picoseconds. + Weight::from_parts(281_299_739, 6795) + // Standard Error: 2_890 + .saturating_add(Weight::from_parts(600_498, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:2 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (6 ±0)` - // Estimated: `6846 + r * (6 ±0)` - // Minimum execution time: 238_563_000 picoseconds. - Weight::from_parts(253_511_314, 6846) - // Standard Error: 1_571 - .saturating_add(Weight::from_parts(1_454_089, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `998 + r * (6 ±0)` + // Estimated: `6922 + r * (6 ±0)` + // Minimum execution time: 271_540_000 picoseconds. + Weight::from_parts(298_456_935, 6922) + // Standard Error: 2_881 + .saturating_add(Weight::from_parts(1_719_337, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `792 + r * (6 ±0)` - // Estimated: `6741 + r * (6 ±0)` - // Minimum execution time: 242_995_000 picoseconds. - Weight::from_parts(240_061_456, 6741) - // Standard Error: 2_650 - .saturating_add(Weight::from_parts(326_813, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `6817 + r * (6 ±0)` + // Minimum execution time: 274_832_000 picoseconds. + Weight::from_parts(286_078_648, 6817) + // Standard Error: 695 + .saturating_add(Weight::from_parts(345_045, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `790 + r * (6 ±0)` - // Estimated: `6739 + r * (6 ±0)` - // Minimum execution time: 241_342_000 picoseconds. - Weight::from_parts(240_875_314, 6739) - // Standard Error: 669 - .saturating_add(Weight::from_parts(324_519, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `6815 + r * (6 ±0)` + // Minimum execution time: 267_337_000 picoseconds. + Weight::from_parts(283_693_170, 6815) + // Standard Error: 580 + .saturating_add(Weight::from_parts(345_350, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787 + r * (6 ±0)` - // Estimated: `6737 + r * (6 ±0)` - // Minimum execution time: 238_954_000 picoseconds. - Weight::from_parts(242_269_896, 6737) - // Standard Error: 1_453 - .saturating_add(Weight::from_parts(317_998, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `863 + r * (6 ±0)` + // Estimated: `6813 + r * (6 ±0)` + // Minimum execution time: 276_313_000 picoseconds. + Weight::from_parts(287_689_703, 6813) + // Standard Error: 1_251 + .saturating_add(Weight::from_parts(342_536, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (6 ±0)` - // Estimated: `6723 + r * (6 ±0)` - // Minimum execution time: 240_935_000 picoseconds. - Weight::from_parts(242_938_271, 6723) - // Standard Error: 792 - .saturating_add(Weight::from_parts(316_782, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `854 + r * (6 ±0)` + // Estimated: `6799 + r * (6 ±0)` + // Minimum execution time: 274_196_000 picoseconds. + Weight::from_parts(288_641_687, 6799) + // Standard Error: 530 + .saturating_add(Weight::from_parts(336_194, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + r * (14 ±0)` - // Estimated: `6785 + r * (14 ±0)` - // Minimum execution time: 240_142_000 picoseconds. - Weight::from_parts(241_386_730, 6785) - // Standard Error: 2_116 - .saturating_add(Weight::from_parts(1_387_202, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `928 + r * (14 ±0)` + // Estimated: `6861 + r * (14 ±0)` + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(292_260_891, 6861) + // Standard Error: 1_019 + .saturating_add(Weight::from_parts(1_447_021, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 1600]`. - fn seal_gas(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `745 + r * (4 ±0)` - // Estimated: `6687 + r * (4 ±0)` - // Minimum execution time: 165_617_000 picoseconds. - Weight::from_parts(170_794_127, 6687) - // Standard Error: 209 - .saturating_add(Weight::from_parts(127_931, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `780 + r * (6 ±0)` - // Estimated: `6724 + r * (6 ±0)` - // Minimum execution time: 238_832_000 picoseconds. - Weight::from_parts(237_110_694, 6724) - // Standard Error: 539 - .saturating_add(Weight::from_parts(280_610, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `856 + r * (6 ±0)` + // Estimated: `6800 + r * (6 ±0)` + // Minimum execution time: 272_720_000 picoseconds. + Weight::from_parts(287_125_181, 6800) + // Standard Error: 491 + .saturating_add(Weight::from_parts(294_488, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `784` - // Estimated: `6724` - // Minimum execution time: 241_070_000 picoseconds. - Weight::from_parts(242_162_279, 6724) - // Standard Error: 1 - .saturating_add(Weight::from_parts(595, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `860` + // Estimated: `6800` + // Minimum execution time: 280_665_000 picoseconds. + Weight::from_parts(233_022_448, 6800) + // Standard Error: 23 + .saturating_add(Weight::from_parts(996, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `768 + r * (45 ±0)` - // Estimated: `6708 + r * (45 ±0)` - // Minimum execution time: 236_337_000 picoseconds. - Weight::from_parts(238_883_828, 6708) - // Standard Error: 188_978 - .saturating_add(Weight::from_parts(926_671, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `844 + r * (45 ±0)` + // Estimated: `6784 + r * (45 ±0)` + // Minimum execution time: 250_335_000 picoseconds. + Weight::from_parts(278_774_071, 6784) + // Standard Error: 873_509 + .saturating_add(Weight::from_parts(4_562_628, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778` - // Estimated: `6731` - // Minimum execution time: 239_103_000 picoseconds. - Weight::from_parts(240_382_910, 6731) + // Measured: `854` + // Estimated: `6807` + // Minimum execution time: 278_402_000 picoseconds. + Weight::from_parts(285_491_021, 6807) // Standard Error: 0 - .saturating_add(Weight::from_parts(181, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:4 w:4) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts DeletionQueueCounter (r:1 w:1) - /// Proof: Contracts DeletionQueueCounter (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) - /// Storage: Contracts DeletionQueue (r:0 w:1) - /// Proof: Contracts DeletionQueue (max_values: None, max_size: Some(142), added: 2617, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:4 w:4) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::DeletionQueue` (r:0 w:1) + /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `810 + r * (356 ±0)` - // Estimated: `6750 + r * (7781 ±0)` - // Minimum execution time: 238_739_000 picoseconds. - Weight::from_parts(241_041_330, 6750) - // Standard Error: 176_820 - .saturating_add(Weight::from_parts(115_332_869, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `2963 + r * (400 ±0)` + // Estimated: `8903 + r * (7825 ±0)` + // Minimum execution time: 281_030_000 picoseconds. + Weight::from_parts(305_435_226, 8903) + // Standard Error: 816_824 + .saturating_add(Weight::from_parts(131_691_873, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((8_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 7781).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(RocksDbWeight::get().writes((9_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 7825).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `825 + r * (10 ±0)` - // Estimated: `6769 + r * (10 ±0)` - // Minimum execution time: 240_888_000 picoseconds. - Weight::from_parts(259_901_113, 6769) - // Standard Error: 5_935 - .saturating_add(Weight::from_parts(1_764_269, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `935 + r * (10 ±0)` + // Estimated: `6876 + r * (10 ±0)` + // Minimum execution time: 261_369_000 picoseconds. + Weight::from_parts(300_458_315, 6876) + // Standard Error: 3_506 + .saturating_add(Weight::from_parts(1_971_733, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `778 + r * (10 ±0)` - // Estimated: `6723 + r * (10 ±0)` - // Minimum execution time: 237_478_000 picoseconds. - Weight::from_parts(264_915_436, 6723) - // Standard Error: 4_644 - .saturating_add(Weight::from_parts(3_452_918, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `854 + r * (10 ±0)` + // Estimated: `6799 + r * (10 ±0)` + // Minimum execution time: 262_894_000 picoseconds. + Weight::from_parts(285_321_838, 6799) + // Standard Error: 6_585 + .saturating_add(Weight::from_parts(3_998_744, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:6 w:6) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:6 w:6) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `797 + t * (32 ±0)` - // Estimated: `6744 + t * (2508 ±0)` - // Minimum execution time: 255_720_000 picoseconds. - Weight::from_parts(247_945_758, 6744) - // Standard Error: 73_390 - .saturating_add(Weight::from_parts(2_483_239, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(756, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `873 + t * (32 ±0)` + // Estimated: `6820 + t * (2508 ±0)` + // Minimum execution time: 275_909_000 picoseconds. + Weight::from_parts(289_251_568, 6820) + // Standard Error: 94_431 + .saturating_add(Weight::from_parts(3_007_409, 0).saturating_mul(t.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(815, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `777 + r * (7 ±0)` - // Estimated: `6721 + r * (7 ±0)` - // Minimum execution time: 172_214_000 picoseconds. - Weight::from_parts(177_306_567, 6721) - // Standard Error: 839 - .saturating_add(Weight::from_parts(230_558, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `853 + r * (7 ±0)` + // Estimated: `6797 + r * (7 ±0)` + // Minimum execution time: 168_482_000 picoseconds. + Weight::from_parts(178_065_606, 6797) + // Standard Error: 371 + .saturating_add(Weight::from_parts(242_851, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: MaxEncodedLen) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: MaxEncodedLen) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `MaxEncodedLen`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125728` - // Estimated: `131670` - // Minimum execution time: 354_105_000 picoseconds. - Weight::from_parts(360_649_854, 131670) - // Standard Error: 2 - .saturating_add(Weight::from_parts(737, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `125804` + // Estimated: `131746` + // Minimum execution time: 407_401_000 picoseconds. + Weight::from_parts(426_585_443, 131746) + // Standard Error: 22 + .saturating_add(Weight::from_parts(986, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `845 + r * (292 ±0)` - // Estimated: `843 + r * (293 ±0)` - // Minimum execution time: 239_637_000 picoseconds. - Weight::from_parts(136_431_436, 843) - // Standard Error: 10_238 - .saturating_add(Weight::from_parts(6_070_221, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `921 + r * (292 ±0)` + // Estimated: `919 + r * (293 ±0)` + // Minimum execution time: 275_800_000 picoseconds. + Weight::from_parts(161_230_700, 919) + // Standard Error: 12_908 + .saturating_add(Weight::from_parts(6_965_844, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1304` - // Estimated: `1280` - // Minimum execution time: 256_198_000 picoseconds. - Weight::from_parts(289_972_802, 1280) - // Standard Error: 54 - .saturating_add(Weight::from_parts(438, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1380` + // Estimated: `1356` + // Minimum execution time: 289_258_000 picoseconds. + Weight::from_parts(334_318_402, 1356) + // Standard Error: 59 + .saturating_add(Weight::from_parts(808, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1167 + n * (1 ±0)` - // Estimated: `1167 + n * (1 ±0)` - // Minimum execution time: 255_519_000 picoseconds. - Weight::from_parts(257_668_217, 1167) - // Standard Error: 19 - .saturating_add(Weight::from_parts(105, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1243 + n * (1 ±0)` + // Estimated: `1243 + n * (1 ±0)` + // Minimum execution time: 277_874_000 picoseconds. + Weight::from_parts(303_956_600, 1243) + // Standard Error: 33 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `841 + r * (288 ±0)` - // Estimated: `845 + r * (289 ±0)` - // Minimum execution time: 239_461_000 picoseconds. - Weight::from_parts(131_630_528, 845) - // Standard Error: 10_483 - .saturating_add(Weight::from_parts(5_910_066, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `917 + r * (288 ±0)` + // Estimated: `921 + r * (289 ±0)` + // Minimum execution time: 255_230_000 picoseconds. + Weight::from_parts(163_226_984, 921) + // Standard Error: 12_691 + .saturating_add(Weight::from_parts(6_808_905, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1163 + n * (1 ±0)` - // Estimated: `1163 + n * (1 ±0)` - // Minimum execution time: 254_904_000 picoseconds. - Weight::from_parts(261_213_399, 1163) - // Standard Error: 178 - .saturating_add(Weight::from_parts(125, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1239 + n * (1 ±0)` + // Estimated: `1239 + n * (1 ±0)` + // Minimum execution time: 275_780_000 picoseconds. + Weight::from_parts(301_967_262, 1239) + // Standard Error: 34 + .saturating_add(Weight::from_parts(128, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `835 + r * (296 ±0)` - // Estimated: `840 + r * (297 ±0)` - // Minimum execution time: 239_995_000 picoseconds. - Weight::from_parts(151_326_508, 840) - // Standard Error: 8_960 - .saturating_add(Weight::from_parts(4_937_728, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `911 + r * (296 ±0)` + // Estimated: `916 + r * (297 ±0)` + // Minimum execution time: 279_295_000 picoseconds. + Weight::from_parts(208_289_066, 916) + // Standard Error: 8_330 + .saturating_add(Weight::from_parts(5_600_713, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1179 + n * (1 ±0)` - // Estimated: `1179 + n * (1 ±0)` - // Minimum execution time: 254_515_000 picoseconds. - Weight::from_parts(256_728_817, 1179) - // Standard Error: 22 - .saturating_add(Weight::from_parts(706, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 276_745_000 picoseconds. + Weight::from_parts(298_824_233, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(717, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `856 + r * (288 ±0)` - // Estimated: `857 + r * (289 ±0)` - // Minimum execution time: 240_601_000 picoseconds. - Weight::from_parts(154_476_561, 857) - // Standard Error: 8_872 - .saturating_add(Weight::from_parts(4_805_043, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `932 + r * (288 ±0)` + // Estimated: `933 + r * (289 ±0)` + // Minimum execution time: 275_137_000 picoseconds. + Weight::from_parts(196_695_898, 933) + // Standard Error: 9_207 + .saturating_add(Weight::from_parts(5_466_071, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1166 + n * (1 ±0)` - // Estimated: `1166 + n * (1 ±0)` - // Minimum execution time: 253_654_000 picoseconds. - Weight::from_parts(257_288_586, 1166) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1242 + n * (1 ±0)` + // Estimated: `1242 + n * (1 ±0)` + // Minimum execution time: 269_315_000 picoseconds. + Weight::from_parts(296_795_271, 1242) + // Standard Error: 39 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `829 + r * (296 ±0)` - // Estimated: `836 + r * (297 ±0)` - // Minimum execution time: 239_869_000 picoseconds. - Weight::from_parts(135_258_204, 836) - // Standard Error: 10_378 - .saturating_add(Weight::from_parts(6_144_770, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `905 + r * (296 ±0)` + // Estimated: `912 + r * (297 ±0)` + // Minimum execution time: 256_406_000 picoseconds. + Weight::from_parts(156_850_288, 912) + // Standard Error: 12_496 + .saturating_add(Weight::from_parts(7_055_305, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1180 + n * (1 ±0)` - // Estimated: `1180 + n * (1 ±0)` - // Minimum execution time: 258_153_000 picoseconds. - Weight::from_parts(260_068_186, 1180) - // Standard Error: 25 - .saturating_add(Weight::from_parts(744, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 280_297_000 picoseconds. + Weight::from_parts(302_241_752, 1256) + // Standard Error: 34 + .saturating_add(Weight::from_parts(748, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: System Account (r:1602 w:1601) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1602 w:1601) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1373 + r * (45 ±0)` - // Estimated: `7270 + r * (2520 ±0)` - // Minimum execution time: 243_189_000 picoseconds. - Weight::from_parts(243_465_000, 7270) - // Standard Error: 30_961 - .saturating_add(Weight::from_parts(35_376_623, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1449 + r * (45 ±0)` + // Estimated: `7346 + r * (2520 ±0)` + // Minimum execution time: 274_834_000 picoseconds. + Weight::from_parts(176_977_557, 7346) + // Standard Error: 32_386 + .saturating_add(Weight::from_parts(39_393_162, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:801 w:801) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:803 w:803) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:803 w:803) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1140 + r * (276 ±0)` - // Estimated: `9332 + r * (2752 ±0)` - // Minimum execution time: 243_656_000 picoseconds. - Weight::from_parts(244_221_000, 9332) - // Standard Error: 69_762 - .saturating_add(Weight::from_parts(216_905_619, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1304 + r * (268 ±0)` + // Estimated: `9485 + r * (2744 ±0)` + // Minimum execution time: 279_802_000 picoseconds. + Weight::from_parts(287_995_000, 9485) + // Standard Error: 99_110 + .saturating_add(Weight::from_parts(245_521_843, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2752).saturating_mul(r.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:736 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:737 w:737) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2744).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:736 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:737 w:737) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (502 ±0)` - // Estimated: `6727 + r * (2572 ±10)` - // Minimum execution time: 242_632_000 picoseconds. - Weight::from_parts(243_068_000, 6727) - // Standard Error: 126_218 - .saturating_add(Weight::from_parts(213_096_291, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) + // Measured: `0 + r * (576 ±0)` + // Estimated: `6803 + r * (2637 ±3)` + // Minimum execution time: 273_435_000 picoseconds. + Weight::from_parts(276_865_000, 6803) + // Standard Error: 148_051 + .saturating_add(Weight::from_parts(244_660_274, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2572).saturating_mul(r.into())) - } - /// Storage: System Account (r:3 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:2 w:2) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:4 w:4) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2637).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:3 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:4 w:4) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1154 + t * (204 ±0)` - // Estimated: `12044 + t * (5154 ±0)` - // Minimum execution time: 421_691_000 picoseconds. - Weight::from_parts(394_587_369, 12044) - // Standard Error: 1_104_014 - .saturating_add(Weight::from_parts(30_461_758, 0).saturating_mul(t.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(601, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) + // Measured: `1322 + t * (310 ±0)` + // Estimated: `12212 + t * (5260 ±0)` + // Minimum execution time: 477_593_000 picoseconds. + Weight::from_parts(69_887_451, 12212) + // Standard Error: 11_764_606 + .saturating_add(Weight::from_parts(373_361_977, 0).saturating_mul(t.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_000, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 5154).saturating_mul(t.into())) - } - /// Storage: System Account (r:1602 w:1602) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:801 w:801) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:801 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:800 w:800) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:802 w:802) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 5260).saturating_mul(t.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1602 w:1602) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:801 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:802 w:802) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1322 + r * (254 ±0)` - // Estimated: `7146 + r * (5205 ±0)` - // Minimum execution time: 581_252_000 picoseconds. - Weight::from_parts(582_275_000, 7146) - // Standard Error: 279_771 - .saturating_add(Weight::from_parts(349_770_967, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1380 + r * (255 ±0)` + // Estimated: `7204 + r * (5206 ±0)` + // Minimum execution time: 652_387_000 picoseconds. + Weight::from_parts(658_670_000, 7204) + // Standard Error: 363_054 + .saturating_add(Weight::from_parts(395_547_049, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5205).saturating_mul(r.into())) - } - /// Storage: System Account (r:4 w:4) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:2 w:2) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:2 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1 w:1) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:3 w:3) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:4 w:4) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:3 w:3) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1071 + t * (187 ±0)` - // Estimated: `9492 + t * (2634 ±2)` - // Minimum execution time: 1_623_241_000 picoseconds. - Weight::from_parts(317_076_173, 9492) - // Standard Error: 4_549_416 - .saturating_add(Weight::from_parts(125_360_446, 0).saturating_mul(t.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_183, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_352, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + // Measured: `1233 + t * (156 ±0)` + // Estimated: `9663 + t * (2578 ±2)` + // Minimum execution time: 2_299_620_000 picoseconds. + Weight::from_parts(1_274_859_063, 9663) + // Standard Error: 12_129_871 + .saturating_add(Weight::from_parts(16_608_792, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_180, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(10_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2634).saturating_mul(t.into())) - } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + .saturating_add(Weight::from_parts(0, 2578).saturating_mul(t.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `777 + r * (8 ±0)` - // Estimated: `6718 + r * (8 ±0)` - // Minimum execution time: 238_262_000 picoseconds. - Weight::from_parts(243_093_288, 6718) - // Standard Error: 870 - .saturating_add(Weight::from_parts(573_939, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `853 + r * (8 ±0)` + // Estimated: `6794 + r * (8 ±0)` + // Minimum execution time: 267_959_000 picoseconds. + Weight::from_parts(282_967_946, 6794) + // Standard Error: 624 + .saturating_add(Weight::from_parts(402_344, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `785` - // Estimated: `6725` - // Minimum execution time: 239_888_000 picoseconds. - Weight::from_parts(242_849_333, 6725) - // Standard Error: 3 - .saturating_add(Weight::from_parts(3_949, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `861` + // Estimated: `6801` + // Minimum execution time: 274_585_000 picoseconds. + Weight::from_parts(272_480_647, 6801) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6721 + r * (8 ±0)` - // Minimum execution time: 237_288_000 picoseconds. - Weight::from_parts(242_510_631, 6721) - // Standard Error: 977 - .saturating_add(Weight::from_parts(742_726, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6797 + r * (8 ±0)` + // Minimum execution time: 268_346_000 picoseconds. + Weight::from_parts(284_168_231, 6797) + // Standard Error: 620 + .saturating_add(Weight::from_parts(805_038, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6729` - // Minimum execution time: 240_006_000 picoseconds. - Weight::from_parts(233_802_510, 6729) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_161, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `863` + // Estimated: `6805` + // Minimum execution time: 273_073_000 picoseconds. + Weight::from_parts(280_346_065, 6805) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_357, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6724 + r * (8 ±0)` - // Minimum execution time: 237_532_000 picoseconds. - Weight::from_parts(243_087_565, 6724) - // Standard Error: 656 - .saturating_add(Weight::from_parts(417_850, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6800 + r * (8 ±0)` + // Minimum execution time: 263_072_000 picoseconds. + Weight::from_parts(284_487_433, 6800) + // Standard Error: 668 + .saturating_add(Weight::from_parts(458_763, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6733` - // Minimum execution time: 241_429_000 picoseconds. - Weight::from_parts(233_528_258, 6733) + // Measured: `863` + // Estimated: `6809` + // Minimum execution time: 271_488_000 picoseconds. + Weight::from_parts(273_877_727, 6809) // Standard Error: 1 - .saturating_add(Weight::from_parts(913, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + r * (8 ±0)` - // Estimated: `6725 + r * (8 ±0)` - // Minimum execution time: 237_622_000 picoseconds. - Weight::from_parts(240_476_401, 6725) - // Standard Error: 795 - .saturating_add(Weight::from_parts(416_869, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `855 + r * (8 ±0)` + // Estimated: `6801 + r * (8 ±0)` + // Minimum execution time: 271_365_000 picoseconds. + Weight::from_parts(285_100_883, 6801) + // Standard Error: 651 + .saturating_add(Weight::from_parts(462_754, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `787` - // Estimated: `6727` - // Minimum execution time: 241_134_000 picoseconds. - Weight::from_parts(234_043_271, 6727) - // Standard Error: 3 - .saturating_add(Weight::from_parts(919, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `863` + // Estimated: `6803` + // Minimum execution time: 272_341_000 picoseconds. + Weight::from_parts(275_388_470, 6803) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `912 + n * (1 ±0)` - // Estimated: `6849 + n * (1 ±0)` - // Minimum execution time: 292_699_000 picoseconds. - Weight::from_parts(301_523_608, 6849) - // Standard Error: 14 - .saturating_add(Weight::from_parts(4_676, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `988 + n * (1 ±0)` + // Estimated: `6925 + n * (1 ±0)` + // Minimum execution time: 341_302_000 picoseconds. + Weight::from_parts(354_111_630, 6925) + // Standard Error: 8 + .saturating_add(Weight::from_parts(5_913, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `727 + r * (112 ±0)` - // Estimated: `6666 + r * (112 ±0)` - // Minimum execution time: 241_126_000 picoseconds. - Weight::from_parts(248_796_458, 6666) - // Standard Error: 21_501 - .saturating_add(Weight::from_parts(48_091_265, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `804 + r * (112 ±0)` + // Estimated: `6742 + r * (112 ±0)` + // Minimum execution time: 275_325_000 picoseconds. + Weight::from_parts(333_041_903, 6742) + // Standard Error: 11_171 + .saturating_add(Weight::from_parts(56_605_218, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `822 + r * (76 ±0)` - // Estimated: `6717 + r * (77 ±0)` - // Minimum execution time: 242_379_000 picoseconds. - Weight::from_parts(261_355_525, 6717) - // Standard Error: 18_862 - .saturating_add(Weight::from_parts(37_603_073, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `898 + r * (76 ±0)` + // Estimated: `6793 + r * (77 ±0)` + // Minimum execution time: 274_165_000 picoseconds. + Weight::from_parts(347_487_800, 6793) + // Standard Error: 15_398 + .saturating_add(Weight::from_parts(46_072_020, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `792 + r * (42 ±0)` - // Estimated: `6731 + r * (42 ±0)` - // Minimum execution time: 241_270_000 picoseconds. - Weight::from_parts(245_135_291, 6731) - // Standard Error: 10_757 - .saturating_add(Weight::from_parts(9_344_876, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `868 + r * (42 ±0)` + // Estimated: `6807 + r * (42 ±0)` + // Minimum execution time: 270_855_000 picoseconds. + Weight::from_parts(320_777_105, 6807) + // Standard Error: 11_106 + .saturating_add(Weight::from_parts(12_053_053, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1536 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts OwnerInfoOf (r:1536 w:1536) - /// Proof: Contracts OwnerInfoOf (max_values: None, max_size: Some(88), added: 2563, mode: Measured) - /// Storage: System EventTopics (r:1538 w:1538) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1536 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1538 w:1538) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (964 ±0)` - // Estimated: `8190 + r * (3090 ±7)` - // Minimum execution time: 240_506_000 picoseconds. - Weight::from_parts(241_653_000, 8190) - // Standard Error: 46_785 - .saturating_add(Weight::from_parts(22_107_816, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `0 + r * (965 ±0)` + // Estimated: `6798 + r * (3090 ±10)` + // Minimum execution time: 257_732_000 picoseconds. + Weight::from_parts(280_982_000, 6798) + // Standard Error: 68_194 + .saturating_add(Weight::from_parts(27_413_991, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `r` is `[0, 32]`. + fn add_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `918 + r * (132 ±0)` + // Estimated: `6870 + r * (2606 ±0)` + // Minimum execution time: 278_285_000 picoseconds. + Weight::from_parts(298_012_554, 6870) + // Standard Error: 24_160 + .saturating_add(Weight::from_parts(6_363_118, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `MaxEncodedLen`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `r` is `[0, 32]`. + fn remove_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `965 + r * (183 ±0)` + // Estimated: `129453 + r * (2568 ±0)` + // Minimum execution time: 258_198_000 picoseconds. + Weight::from_parts(290_090_206, 129453) + // Standard Error: 19_792 + .saturating_add(Weight::from_parts(6_004_811, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `773 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 241_539_000 picoseconds. - Weight::from_parts(245_471_045, 6723) - // Standard Error: 416 - .saturating_add(Weight::from_parts(159_577, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `849 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 263_315_000 picoseconds. + Weight::from_parts(284_093_748, 6799) + // Standard Error: 371 + .saturating_add(Weight::from_parts(176_949, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1975 + r * (39 ±0)` - // Estimated: `7805 + r * (40 ±0)` - // Minimum execution time: 242_702_000 picoseconds. - Weight::from_parts(274_518_595, 7805) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(256_973, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `2082 + r * (39 ±0)` + // Estimated: `7886 + r * (40 ±0)` + // Minimum execution time: 274_583_000 picoseconds. + Weight::from_parts(352_081_486, 7886) + // Standard Error: 1_799 + .saturating_add(Weight::from_parts(313_433, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: Measured) - /// Storage: Contracts ContractInfoOf (r:1 w:1) - /// Proof: Contracts ContractInfoOf (max_values: None, max_size: Some(290), added: 2765, mode: Measured) - /// Storage: Contracts CodeStorage (r:1 w:0) - /// Proof: Contracts CodeStorage (max_values: None, max_size: Some(126001), added: 128476, mode: Measured) - /// Storage: Timestamp Now (r:1 w:0) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: Contracts Nonce (r:1 w:1) - /// Proof: Contracts Nonce (max_values: Some(1), max_size: Some(8), added: 503, mode: Measured) - /// Storage: System EventTopics (r:2 w:2) - /// Proof Skipped: System EventTopics (max_values: None, max_size: None, mode: Measured) + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1819), added: 4294, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:1 w:0) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `776 + r * (3 ±0)` - // Estimated: `6723 + r * (3 ±0)` - // Minimum execution time: 239_360_000 picoseconds. - Weight::from_parts(245_990_810, 6723) - // Standard Error: 3_188 - .saturating_add(Weight::from_parts(143_408, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `852 + r * (3 ±0)` + // Estimated: `6799 + r * (3 ±0)` + // Minimum execution time: 267_291_000 picoseconds. + Weight::from_parts(287_500_540, 6799) + // Standard Error: 393 + .saturating_add(Weight::from_parts(152_587, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3687,509 +3865,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_617_000 picoseconds. - Weight::from_parts(1_900_268, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(2_950, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64load(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_739_000 picoseconds. - Weight::from_parts(2_109_373, 0) - // Standard Error: 43 - .saturating_add(Weight::from_parts(6_586, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64store(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_726_000 picoseconds. - Weight::from_parts(2_268_507, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(6_022, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_select(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_628_000 picoseconds. - Weight::from_parts(2_042_521, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(7_935, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_if(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_648_000 picoseconds. - Weight::from_parts(1_902_691, 0) - // Standard Error: 7 - .saturating_add(Weight::from_parts(10_572, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_br(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_626_000 picoseconds. - Weight::from_parts(1_891_843, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(4_612, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_br_if(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_581_000 picoseconds. - Weight::from_parts(1_139_823, 0) - // Standard Error: 74 - .saturating_add(Weight::from_parts(8_008, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_br_table(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_591_000 picoseconds. - Weight::from_parts(1_258_400, 0) - // Standard Error: 34 - .saturating_add(Weight::from_parts(9_706, 0).saturating_mul(r.into())) - } - /// The range of component `e` is `[1, 256]`. - fn instr_br_table_per_entry(e: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_701_000 picoseconds. - Weight::from_parts(1_876_118, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(e.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_617_000 picoseconds. - Weight::from_parts(1_565_613, 0) - // Standard Error: 629 - .saturating_add(Weight::from_parts(19_575, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_call_indirect(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_875_000 picoseconds. - Weight::from_parts(4_549_584, 0) - // Standard Error: 278 - .saturating_add(Weight::from_parts(24_336, 0).saturating_mul(r.into())) - } - /// The range of component `l` is `[0, 1024]`. - fn instr_call_per_local(l: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_742_000 picoseconds. - Weight::from_parts(2_087_387, 0) - // Standard Error: 26 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(l.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_get(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(3_552_428, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(2_339, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_set(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_866_000 picoseconds. - Weight::from_parts(3_151_948, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(3_667, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_local_tee(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_919_000 picoseconds. - Weight::from_parts(3_214_587, 0) - // Standard Error: 5 - .saturating_add(Weight::from_parts(3_867, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_global_get(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_764_000 picoseconds. - Weight::from_parts(1_815_683, 0) - // Standard Error: 123 - .saturating_add(Weight::from_parts(8_733, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_global_set(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_783_000 picoseconds. - Weight::from_parts(2_437_152, 0) + // Minimum execution time: 1_440_000 picoseconds. + Weight::from_parts(1_656_631, 0) // Standard Error: 13 - .saturating_add(Weight::from_parts(8_839, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_memory_current(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_745_000 picoseconds. - Weight::from_parts(2_018_078, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_756, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 16]`. - fn instr_memory_grow(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_648_000 picoseconds. - Weight::from_parts(648_059, 0) - // Standard Error: 142_299 - .saturating_add(Weight::from_parts(13_313_060, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64clz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_652_000 picoseconds. - Weight::from_parts(1_953_179, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_828, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ctz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_607_000 picoseconds. - Weight::from_parts(1_924_759, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_762, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64popcnt(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_687_000 picoseconds. - Weight::from_parts(1_959_683, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_754, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64eqz(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_641_000 picoseconds. - Weight::from_parts(1_975_838, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_681, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64extendsi32(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_689_000 picoseconds. - Weight::from_parts(1_980_109, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_880, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64extendui32(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_671_000 picoseconds. - Weight::from_parts(1_912_089, 0) - // Standard Error: 29 - .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i32wrapi64(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_643_000 picoseconds. - Weight::from_parts(1_951_485, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_725, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64eq(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_649_000 picoseconds. - Weight::from_parts(1_937_598, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(6_045, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ne(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_651_000 picoseconds. - Weight::from_parts(2_202_977, 0) - // Standard Error: 313 - .saturating_add(Weight::from_parts(6_299, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64lts(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_589_000 picoseconds. - Weight::from_parts(1_946_304, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(6_019, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ltu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_614_000 picoseconds. - Weight::from_parts(1_933_375, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(6_020, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64gts(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_678_000 picoseconds. - Weight::from_parts(2_003_850, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_816, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64gtu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_651_000 picoseconds. - Weight::from_parts(1_971_321, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_114, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64les(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_647_000 picoseconds. - Weight::from_parts(2_017_232, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_990, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64leu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_635_000 picoseconds. - Weight::from_parts(3_232_848, 0) - // Standard Error: 105 - .saturating_add(Weight::from_parts(5_816, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64ges(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_623_000 picoseconds. - Weight::from_parts(1_996_165, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_964, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64geu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_668_000 picoseconds. - Weight::from_parts(1_973_238, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(6_021, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64add(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_674_000 picoseconds. - Weight::from_parts(1_981_762, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_898, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64sub(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_632_000 picoseconds. - Weight::from_parts(1_935_700, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_154, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64mul(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_607_000 picoseconds. - Weight::from_parts(1_942_734, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_797, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64divs(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_611_000 picoseconds. - Weight::from_parts(2_960_454, 0) - // Standard Error: 177 - .saturating_add(Weight::from_parts(11_666, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64divu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_641_000 picoseconds. - Weight::from_parts(2_104_200, 0) - // Standard Error: 5 - .saturating_add(Weight::from_parts(10_540, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rems(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_643_000 picoseconds. - Weight::from_parts(2_602_908, 0) - // Standard Error: 24 - .saturating_add(Weight::from_parts(11_900, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64remu(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_584_000 picoseconds. - Weight::from_parts(2_056_817, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(10_722, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64and(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_652_000 picoseconds. - Weight::from_parts(1_988_892, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(5_683, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64or(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(2_148_537, 0) - // Standard Error: 38 - .saturating_add(Weight::from_parts(5_756, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64xor(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_629_000 picoseconds. - Weight::from_parts(1_955_010, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(5_931, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shl(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_569_000 picoseconds. - Weight::from_parts(1_982_403, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_867, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shrs(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_615_000 picoseconds. - Weight::from_parts(1_989_920, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(6_137, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64shru(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_646_000 picoseconds. - Weight::from_parts(2_020_935, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rotl(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_661_000 picoseconds. - Weight::from_parts(2_320_710, 0) - // Standard Error: 27 - .saturating_add(Weight::from_parts(5_922, 0).saturating_mul(r.into())) - } - /// The range of component `r` is `[0, 5000]`. - fn instr_i64rotr(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_674_000 picoseconds. - Weight::from_parts(2_044_188, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(5_855, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(10_486, 0).saturating_mul(r.into())) } } diff --git a/frame/conviction-voting/Cargo.toml b/frame/conviction-voting/Cargo.toml index f9390d6b6efe1..cdc50340f7731 100644 --- a/frame/conviction-voting/Cargo.toml +++ b/frame/conviction-voting/Cargo.toml @@ -14,23 +14,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +serde = { version = "1.0.163", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -44,6 +44,9 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "pallet-scheduler/std", + "sp-core/std" ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", @@ -51,5 +54,13 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-scheduler/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 3ad81486ed26d..9c2993fc5cae1 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -35,6 +35,7 @@ use frame_support::{ ReservableCurrency, WithdrawReasons, }, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, ArithmeticError, Perbill, @@ -68,16 +69,13 @@ type BalanceOf = type VotingOf = Voting< BalanceOf, ::AccountId, - ::BlockNumber, + BlockNumberFor, PollIndexOf, >::MaxVotes, >; #[allow(dead_code)] -type DelegatingOf = Delegating< - BalanceOf, - ::AccountId, - ::BlockNumber, ->; +type DelegatingOf = + Delegating, ::AccountId, BlockNumberFor>; pub type TallyOf = Tally, >::MaxTurnout>; pub type VotesOf = BalanceOf; type PollIndexOf = <>::Polls as Polling>>::Index; @@ -103,14 +101,14 @@ pub mod pallet { type WeightInfo: WeightInfo; /// Currency type with which voting happens. type Currency: ReservableCurrency - + LockableCurrency + + LockableCurrency> + fungible::Inspect; /// The implementation of the logic which conducts polls. type Polls: Polling< TallyOf, Votes = BalanceOf, - Moment = Self::BlockNumber, + Moment = BlockNumberFor, >; /// The maximum amount of tokens which may be used for voting. May just be @@ -130,7 +128,7 @@ pub mod pallet { /// It should be no shorter than enactment period to ensure that in the case of an approval, /// those successful voters are locked into the consequences that their votes entail. #[pallet::constant] - type VoteLockingPeriod: Get; + type VoteLockingPeriod: Get>; } /// All voting for a particular voter in a particular voting class. We store the balance for the diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index f33e511a164f6..656112deebfbb 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -25,23 +25,19 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; use super::*; use crate as pallet_conviction_voting; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Voting: pallet_conviction_voting::{Pallet, Call, Storage, Event}, } @@ -61,14 +57,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -94,7 +89,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -196,7 +191,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], } diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index bc53855b6b18e..225f5c2cadd6f 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_conviction_voting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_conviction_voting +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_conviction_voting. pub trait WeightInfo { @@ -76,8 +80,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `13074` // Estimated: `219984` - // Minimum execution time: 99_130_000 picoseconds. - Weight::from_parts(100_355_000, 219984) + // Minimum execution time: 112_936_000 picoseconds. + Weight::from_parts(116_972_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -97,8 +101,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20216` // Estimated: `219984` - // Minimum execution time: 276_420_000 picoseconds. - Weight::from_parts(277_433_000, 219984) + // Minimum execution time: 291_971_000 picoseconds. + Weight::from_parts(301_738_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -112,8 +116,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `19968` // Estimated: `219984` - // Minimum execution time: 241_058_000 picoseconds. - Weight::from_parts(242_235_000, 219984) + // Minimum execution time: 262_582_000 picoseconds. + Weight::from_parts(270_955_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -125,8 +129,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `12675` // Estimated: `30706` - // Minimum execution time: 46_385_000 picoseconds. - Weight::from_parts(46_709_000, 30706) + // Minimum execution time: 52_909_000 picoseconds. + Weight::from_parts(56_365_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -147,10 +151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `240 + r * (1627 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 48_947_000 picoseconds. - Weight::from_parts(50_219_593, 109992) - // Standard Error: 70_238 - .saturating_add(Weight::from_parts(40_509_706, 0).saturating_mul(r.into())) + // Minimum execution time: 54_640_000 picoseconds. + Weight::from_parts(57_185_281, 109992) + // Standard Error: 193_362 + .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -168,10 +172,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `406 + r * (1376 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 23_408_000 picoseconds. - Weight::from_parts(24_087_793, 109992) - // Standard Error: 31_776 - .saturating_add(Weight::from_parts(36_594_606, 0).saturating_mul(r.into())) + // Minimum execution time: 26_514_000 picoseconds. + Weight::from_parts(28_083_732, 109992) + // Standard Error: 104_905 + .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -190,8 +194,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `11734` // Estimated: `30706` - // Minimum execution time: 65_903_000 picoseconds. - Weight::from_parts(66_460_000, 30706) + // Minimum execution time: 71_140_000 picoseconds. + Weight::from_parts(77_388_000, 30706) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -215,8 +219,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `13074` // Estimated: `219984` - // Minimum execution time: 99_130_000 picoseconds. - Weight::from_parts(100_355_000, 219984) + // Minimum execution time: 112_936_000 picoseconds. + Weight::from_parts(116_972_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -236,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20216` // Estimated: `219984` - // Minimum execution time: 276_420_000 picoseconds. - Weight::from_parts(277_433_000, 219984) + // Minimum execution time: 291_971_000 picoseconds. + Weight::from_parts(301_738_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -251,8 +255,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `19968` // Estimated: `219984` - // Minimum execution time: 241_058_000 picoseconds. - Weight::from_parts(242_235_000, 219984) + // Minimum execution time: 262_582_000 picoseconds. + Weight::from_parts(270_955_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -264,8 +268,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `12675` // Estimated: `30706` - // Minimum execution time: 46_385_000 picoseconds. - Weight::from_parts(46_709_000, 30706) + // Minimum execution time: 52_909_000 picoseconds. + Weight::from_parts(56_365_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -286,10 +290,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `240 + r * (1627 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 48_947_000 picoseconds. - Weight::from_parts(50_219_593, 109992) - // Standard Error: 70_238 - .saturating_add(Weight::from_parts(40_509_706, 0).saturating_mul(r.into())) + // Minimum execution time: 54_640_000 picoseconds. + Weight::from_parts(57_185_281, 109992) + // Standard Error: 193_362 + .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -307,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `406 + r * (1376 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 23_408_000 picoseconds. - Weight::from_parts(24_087_793, 109992) - // Standard Error: 31_776 - .saturating_add(Weight::from_parts(36_594_606, 0).saturating_mul(r.into())) + // Minimum execution time: 26_514_000 picoseconds. + Weight::from_parts(28_083_732, 109992) + // Standard Error: 104_905 + .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -329,8 +333,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `11734` // Estimated: `30706` - // Minimum execution time: 65_903_000 picoseconds. - Weight::from_parts(66_460_000, 30706) + // Minimum execution time: 71_140_000 picoseconds. + Weight::from_parts(77_388_000, 30706) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/frame/core-fellowship/Cargo.toml b/frame/core-fellowship/Cargo.toml index 16b673fb41b98..0d2cb7904efd9 100644 --- a/frame/core-fellowship/Cargo.toml +++ b/frame/core-fellowship/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -46,4 +46,8 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/core-fellowship/src/benchmarking.rs b/frame/core-fellowship/src/benchmarking.rs index 551ec30c19f01..ea0b5c6d4495f 100644 --- a/frame/core-fellowship/src/benchmarking.rs +++ b/frame/core-fellowship/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ use super::*; use crate::Pallet as CoreFellowship; use frame_benchmarking::v2::*; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_arithmetic::traits::Bounded; const SEED: u32 = 0; @@ -75,7 +75,7 @@ mod benchmarks { let member = make_member::(0)?; // Set it to the max value to ensure that any possible auto-demotion period has passed. - frame_system::Pallet::::set_block_number(T::BlockNumber::max_value()); + frame_system::Pallet::::set_block_number(BlockNumberFor::::max_value()); ensure_evidence::(&member)?; assert!(Member::::contains_key(&member)); @@ -92,7 +92,7 @@ mod benchmarks { let member = make_member::(2)?; // Set it to the max value to ensure that any possible auto-demotion period has passed. - frame_system::Pallet::::set_block_number(T::BlockNumber::max_value()); + frame_system::Pallet::::set_block_number(BlockNumberFor::::max_value()); ensure_evidence::(&member)?; assert!(Member::::contains_key(&member)); assert_eq!(T::Members::rank_of(&member), Some(2)); diff --git a/frame/core-fellowship/src/lib.rs b/frame/core-fellowship/src/lib.rs index 97603fcc6ce9e..8f3d9633fcf22 100644 --- a/frame/core-fellowship/src/lib.rs +++ b/frame/core-fellowship/src/lib.rs @@ -65,8 +65,10 @@ use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ dispatch::DispatchResultWithPostInfo, - ensure, - traits::{tokens::Balance as BalanceTrait, EnsureOrigin, Get, RankedMembers}, + ensure, impl_ensure_origin_with_arg_ignoring_arg, + traits::{ + tokens::Balance as BalanceTrait, EnsureOrigin, EnsureOriginWithArg, Get, RankedMembers, + }, BoundedVec, RuntimeDebug, }; @@ -191,9 +193,8 @@ pub mod pallet { type EvidenceSize: Get; } - pub type ParamsOf = - ParamsType<>::Balance, ::BlockNumber, RANK_COUNT>; - pub type MemberStatusOf = MemberStatus<::BlockNumber>; + pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, RANK_COUNT>; + pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; /// The overall status of the system. @@ -321,7 +322,7 @@ pub mod pallet { /// Set the parameters. /// - /// - `origin`: A origin complying with `ParamsOrigin` or root. + /// - `origin`: An origin complying with `ParamsOrigin` or root. /// - `params`: The new parameters for the pallet. #[pallet::weight(T::WeightInfo::set_params())] #[pallet::call_index(1)] @@ -570,7 +571,7 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - let who = frame_system::EnsureSigned::try_origin(o)?; + let who = as EnsureOrigin<_>>::try_origin(o)?; match T::Members::rank_of(&who) { Some(rank) if rank >= MIN_RANK && Member::::contains_key(&who) => Ok(who), _ => Err(frame_system::RawOrigin::Signed(who).into()), @@ -591,3 +592,9 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin, I: 'static, const MIN_RANK: u16, A } > + EnsureOriginWithArg for EnsureInducted + {} +} diff --git a/frame/core-fellowship/src/tests.rs b/frame/core-fellowship/src/tests.rs index 87c0de112ac33..c95699e66e41b 100644 --- a/frame/core-fellowship/src/tests.rs +++ b/frame/core-fellowship/src/tests.rs @@ -28,25 +28,20 @@ use frame_support::{ use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup, TryMorphInto}, - DispatchError, DispatchResult, + BuildStorage, DispatchError, DispatchResult, }; use sp_std::cell::RefCell; use super::*; use crate as pallet_core_fellowship; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, CoreFellowship: pallet_core_fellowship::{Pallet, Call, Storage, Event}, } ); @@ -61,14 +56,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -149,7 +143,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { let params = ParamsType { diff --git a/frame/core-fellowship/src/weights.rs b/frame/core-fellowship/src/weights.rs index 28ebeae64733e..8bbfd1a4dd81d 100644 --- a/frame/core-fellowship/src/weights.rs +++ b/frame/core-fellowship/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_core_fellowship //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_core_fellowship +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_core_fellowship. pub trait WeightInfo { @@ -69,8 +73,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_390_000 picoseconds. - Weight::from_parts(10_847_000, 0) + // Minimum execution time: 9_454_000 picoseconds. + Weight::from_parts(9_804_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) @@ -87,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16854` + // Measured: `16887` // Estimated: `19894` - // Minimum execution time: 61_737_000 picoseconds. - Weight::from_parts(62_207_000, 19894) + // Minimum execution time: 58_489_000 picoseconds. + Weight::from_parts(60_202_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -108,10 +112,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16964` + // Measured: `16997` // Estimated: `19894` - // Minimum execution time: 64_226_000 picoseconds. - Weight::from_parts(64_678_000, 19894) + // Minimum execution time: 60_605_000 picoseconds. + Weight::from_parts(63_957_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -121,10 +125,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn set_active() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `388` // Estimated: `3514` - // Minimum execution time: 18_977_000 picoseconds. - Weight::from_parts(19_157_000, 3514) + // Minimum execution time: 17_816_000 picoseconds. + Weight::from_parts(18_524_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,10 +144,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `146` // Estimated: `3514` - // Minimum execution time: 28_633_000 picoseconds. - Weight::from_parts(29_074_000, 3514) + // Minimum execution time: 27_249_000 picoseconds. + Weight::from_parts(28_049_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -163,10 +167,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `16832` + // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 59_008_000 picoseconds. - Weight::from_parts(59_690_000, 19894) + // Minimum execution time: 56_642_000 picoseconds. + Weight::from_parts(59_353_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -178,10 +182,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `359` // Estimated: `3514` - // Minimum execution time: 18_892_000 picoseconds. - Weight::from_parts(19_095_000, 3514) + // Minimum execution time: 17_459_000 picoseconds. + Weight::from_parts(18_033_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -191,10 +195,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn import() -> Weight { // Proof Size summary in bytes: - // Measured: `280` + // Measured: `313` // Estimated: `3514` - // Minimum execution time: 18_107_000 picoseconds. - Weight::from_parts(18_371_000, 3514) + // Minimum execution time: 16_728_000 picoseconds. + Weight::from_parts(17_263_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,10 +210,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `16810` + // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 43_243_000 picoseconds. - Weight::from_parts(43_965_000, 19894) + // Minimum execution time: 41_487_000 picoseconds. + Weight::from_parts(43_459_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -221,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 27_881_000 picoseconds. - Weight::from_parts(28_208_000, 19894) + // Minimum execution time: 26_033_000 picoseconds. + Weight::from_parts(26_612_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -236,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_390_000 picoseconds. - Weight::from_parts(10_847_000, 0) + // Minimum execution time: 9_454_000 picoseconds. + Weight::from_parts(9_804_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: CoreFellowship Member (r:1 w:1) @@ -254,10 +258,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16854` + // Measured: `16887` // Estimated: `19894` - // Minimum execution time: 61_737_000 picoseconds. - Weight::from_parts(62_207_000, 19894) + // Minimum execution time: 58_489_000 picoseconds. + Weight::from_parts(60_202_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -275,10 +279,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16964` + // Measured: `16997` // Estimated: `19894` - // Minimum execution time: 64_226_000 picoseconds. - Weight::from_parts(64_678_000, 19894) + // Minimum execution time: 60_605_000 picoseconds. + Weight::from_parts(63_957_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -288,10 +292,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn set_active() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `388` // Estimated: `3514` - // Minimum execution time: 18_977_000 picoseconds. - Weight::from_parts(19_157_000, 3514) + // Minimum execution time: 17_816_000 picoseconds. + Weight::from_parts(18_524_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,10 +311,10 @@ impl WeightInfo for () { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `146` // Estimated: `3514` - // Minimum execution time: 28_633_000 picoseconds. - Weight::from_parts(29_074_000, 3514) + // Minimum execution time: 27_249_000 picoseconds. + Weight::from_parts(28_049_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -330,10 +334,10 @@ impl WeightInfo for () { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `16832` + // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 59_008_000 picoseconds. - Weight::from_parts(59_690_000, 19894) + // Minimum execution time: 56_642_000 picoseconds. + Weight::from_parts(59_353_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -345,10 +349,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `359` // Estimated: `3514` - // Minimum execution time: 18_892_000 picoseconds. - Weight::from_parts(19_095_000, 3514) + // Minimum execution time: 17_459_000 picoseconds. + Weight::from_parts(18_033_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -358,10 +362,10 @@ impl WeightInfo for () { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn import() -> Weight { // Proof Size summary in bytes: - // Measured: `280` + // Measured: `313` // Estimated: `3514` - // Minimum execution time: 18_107_000 picoseconds. - Weight::from_parts(18_371_000, 3514) + // Minimum execution time: 16_728_000 picoseconds. + Weight::from_parts(17_263_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -373,10 +377,10 @@ impl WeightInfo for () { /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `16810` + // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 43_243_000 picoseconds. - Weight::from_parts(43_965_000, 19894) + // Minimum execution time: 41_487_000 picoseconds. + Weight::from_parts(43_459_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -388,8 +392,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 27_881_000 picoseconds. - Weight::from_parts(28_208_000, 19894) + // Minimum execution time: 26_033_000 picoseconds. + Weight::from_parts(26_612_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 1f46bfaed8c0b..f29af914295fe 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -13,18 +13,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +serde = { version = "1.0.163", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -45,11 +45,24 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-core/std", + "pallet-balances/std", + "pallet-preimage/std", + "pallet-scheduler/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-preimage/try-runtime", + "pallet-scheduler/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime",] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 9a67ba698a662..e4a21a4e1d9b8 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::{ assert_noop, assert_ok, traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, }; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_core::H256; use sp_runtime::{traits::Bounded, BoundedVec}; @@ -258,7 +258,7 @@ benchmarks! { .collect::>() .try_into() .unwrap(); - Blacklist::::insert(proposal.hash(), (T::BlockNumber::zero(), addresses)); + Blacklist::::insert(proposal.hash(), (BlockNumberFor::::zero(), addresses)); }: _(origin, proposal) verify { // External proposal created @@ -332,7 +332,7 @@ benchmarks! { vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); - Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); + Blacklist::::insert(proposal_hash, (BlockNumberFor::::zero(), vetoers)); let origin = T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; ensure!(NextExternal::::get().is_some(), "no external proposal"); @@ -816,7 +816,7 @@ benchmarks! { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, - ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true }, + ReferendumInfo::Finished { end: BlockNumberFor::::zero(), approved: true }, ); let owner = MetadataOwner::Referendum(0); let caller = funded_account::("caller", 0); @@ -833,7 +833,7 @@ benchmarks! { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, - ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true }, + ReferendumInfo::Finished { end: BlockNumberFor::::zero(), approved: true }, ); let owner = MetadataOwner::Referendum(0); let hash = note_preimage::(); diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 370559b10a56a..e538d31c6ad03 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -165,7 +165,7 @@ use frame_support::{ }, weights::Weight, }; -use frame_system::pallet_prelude::OriginFor; +use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; use sp_runtime::{ traits::{Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, ArithmeticError, DispatchError, DispatchResult, @@ -195,7 +195,7 @@ pub mod benchmarking; pub mod migrations; -const DEMOCRACY_ID: LockIdentifier = *b"democrac"; +pub(crate) const DEMOCRACY_ID: LockIdentifier = *b"democrac"; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -226,14 +226,14 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The Scheduler. - type Scheduler: ScheduleNamed, Self::PalletsOrigin>; + type Scheduler: ScheduleNamed, CallOf, Self::PalletsOrigin>; /// The Preimage provider. type Preimages: QueryPreimage + StorePreimage; /// Currency type for this pallet. type Currency: ReservableCurrency - + LockableCurrency; + + LockableCurrency>; /// The period between a proposal being approved and enacted. /// @@ -241,22 +241,22 @@ pub mod pallet { /// voting stakers have an opportunity to remove themselves from the system in the case /// where they are on the losing side of a vote. #[pallet::constant] - type EnactmentPeriod: Get; + type EnactmentPeriod: Get>; /// How often (in blocks) new public referenda are launched. #[pallet::constant] - type LaunchPeriod: Get; + type LaunchPeriod: Get>; /// How often (in blocks) to check for new votes. #[pallet::constant] - type VotingPeriod: Get; + type VotingPeriod: Get>; /// The minimum period of vote locking. /// /// It should be no shorter than enactment period to ensure that in the case of an approval, /// those successful voters are locked into the consequences that their votes entail. #[pallet::constant] - type VoteLockingPeriod: Get; + type VoteLockingPeriod: Get>; /// The minimum amount to be used as a deposit for a public referendum proposal. #[pallet::constant] @@ -270,11 +270,11 @@ pub mod pallet { /// Minimum voting period allowed for a fast-track referendum. #[pallet::constant] - type FastTrackVotingPeriod: Get; + type FastTrackVotingPeriod: Get>; /// Period in blocks where an external proposal may not be re-submitted after being vetoed. #[pallet::constant] - type CooloffPeriod: Get; + type CooloffPeriod: Get>; /// The maximum number of votes for an account. /// @@ -387,7 +387,7 @@ pub mod pallet { _, Twox64Concat, ReferendumIndex, - ReferendumInfo, BalanceOf>, + ReferendumInfo, BoundedCallOf, BalanceOf>, >; /// All votes for a particular voter. We store the balance for the number of votes that we @@ -399,7 +399,7 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - Voting, T::AccountId, T::BlockNumber, T::MaxVotes>, + Voting, T::AccountId, BlockNumberFor, T::MaxVotes>, ValueQuery, >; @@ -422,7 +422,7 @@ pub mod pallet { _, Identity, H256, - (T::BlockNumber, BoundedVec), + (BlockNumberFor, BoundedVec), >; /// Record of all proposals that have been subject to emergency cancellation. @@ -441,11 +441,12 @@ pub mod pallet { #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { - _phantom: sp_std::marker::PhantomData, + #[serde(skip)] + _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { PublicPropCount::::put(0 as PropIndex); ReferendumCount::::put(0 as ReferendumIndex); @@ -475,7 +476,7 @@ pub mod pallet { /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: H256, until: T::BlockNumber }, + Vetoed { who: T::AccountId, proposal_hash: H256, until: BlockNumberFor }, /// A proposal_hash has been blacklisted permanently. Blacklisted { proposal_hash: H256 }, /// An account has voted in a referendum @@ -565,7 +566,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { /// Weight: see `begin_block` - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { Self::begin_block(n) } } @@ -775,8 +776,8 @@ pub mod pallet { pub fn fast_track( origin: OriginFor, proposal_hash: H256, - voting_period: T::BlockNumber, - delay: T::BlockNumber, + voting_period: BlockNumberFor, + delay: BlockNumberFor, ) -> DispatchResult { // Rather complicated bit of code to ensure that either: // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is @@ -794,7 +795,7 @@ pub mod pallet { ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); + ensure!(voting_period > Zero::zero(), Error::::VotingPeriodLow); let (ext_proposal, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( @@ -1047,7 +1048,8 @@ pub mod pallet { T::BlacklistOrigin::ensure_origin(origin)?; // Insert the proposal into the blacklist. - let permanent = (T::BlockNumber::max_value(), BoundedVec::::default()); + let permanent = + (BlockNumberFor::::max_value(), BoundedVec::::default()); Blacklist::::insert(&proposal_hash, permanent); // Remove the queued proposal, if it's there. @@ -1200,17 +1202,19 @@ impl Pallet { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( - n: T::BlockNumber, - ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { + n: BlockNumberFor, + ) -> Vec<(ReferendumIndex, ReferendumStatus, BoundedCallOf, BalanceOf>)> + { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); Self::maturing_referenda_at_inner(n, next..last) } fn maturing_referenda_at_inner( - n: T::BlockNumber, + n: BlockNumberFor, range: core::ops::Range, - ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus, BoundedCallOf, BalanceOf>)> + { range .into_iter() .map(|i| (i, Self::referendum_info(i))) @@ -1228,7 +1232,7 @@ impl Pallet { pub fn internal_start_referendum( proposal: BoundedCallOf, threshold: VoteThreshold, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> ReferendumIndex { >::inject_referendum( >::block_number().saturating_add(T::VotingPeriod::get()), @@ -1249,8 +1253,9 @@ impl Pallet { /// Ok if the given referendum is active, Err otherwise fn ensure_ongoing( - r: ReferendumInfo, BalanceOf>, - ) -> Result, BalanceOf>, DispatchError> { + r: ReferendumInfo, BoundedCallOf, BalanceOf>, + ) -> Result, BoundedCallOf, BalanceOf>, DispatchError> + { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), @@ -1259,7 +1264,8 @@ impl Pallet { fn referendum_status( ref_index: ReferendumIndex, - ) -> Result, BalanceOf>, DispatchError> { + ) -> Result, BoundedCallOf, BalanceOf>, DispatchError> + { let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } @@ -1514,10 +1520,10 @@ impl Pallet { /// Start a referendum fn inject_referendum( - end: T::BlockNumber, + end: BlockNumberFor, proposal: BoundedCallOf, threshold: VoteThreshold, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::::put(ref_index + 1); @@ -1530,7 +1536,7 @@ impl Pallet { } /// Table the next waiting proposal for a vote. - fn launch_next(now: T::BlockNumber) -> DispatchResult { + fn launch_next(now: BlockNumberFor) -> DispatchResult { if LastTabledWasExternal::::take() { Self::launch_public(now).or_else(|_| Self::launch_external(now)) } else { @@ -1540,7 +1546,7 @@ impl Pallet { } /// Table the waiting external proposal for a vote, if there is one. - fn launch_external(now: T::BlockNumber) -> DispatchResult { + fn launch_external(now: BlockNumberFor) -> DispatchResult { if let Some((proposal, threshold)) = >::take() { LastTabledWasExternal::::put(true); Self::deposit_event(Event::::ExternalTabled); @@ -1558,7 +1564,7 @@ impl Pallet { } /// Table the waiting public proposal with the highest backing for a vote. - fn launch_public(now: T::BlockNumber) -> DispatchResult { + fn launch_public(now: BlockNumberFor) -> DispatchResult { let mut public_props = Self::public_props(); if let Some((winner_index, _)) = public_props.iter().enumerate().max_by_key( // defensive only: All current public proposals have an amount locked @@ -1591,9 +1597,9 @@ impl Pallet { } fn bake_referendum( - now: T::BlockNumber, + now: BlockNumberFor, index: ReferendumIndex, - status: ReferendumStatus, BalanceOf>, + status: ReferendumStatus, BoundedCallOf, BalanceOf>, ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); @@ -1628,7 +1634,7 @@ impl Pallet { /// ## Complexity: /// If a referendum is launched or maturing, this will take full block weight if queue is not /// empty. Otherwise, `O(R)` where `R` is the number of unbaked referenda. - fn begin_block(now: T::BlockNumber) -> Weight { + fn begin_block(now: BlockNumberFor) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = Weight::zero(); diff --git a/frame/democracy/src/migrations/mod.rs b/frame/democracy/src/migrations/mod.rs new file mode 100644 index 0000000000000..3f77e03884edd --- /dev/null +++ b/frame/democracy/src/migrations/mod.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! All migrations of this pallet. + +/// Migration to unlock and unreserve all pallet funds. +pub mod unlock_and_unreserve_all_funds; + +/// V1 storage migrations for the preimage pallet. +pub mod v1; diff --git a/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs b/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs new file mode 100644 index 0000000000000..188c475f64d0e --- /dev/null +++ b/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs @@ -0,0 +1,430 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A migration that unreserves all deposit and unlocks all stake held in the context of this +//! pallet. + +use crate::{PropIndex, Voting, DEMOCRACY_ID}; +use core::iter::Sum; +use frame_support::{ + pallet_prelude::ValueQuery, + storage_alias, + traits::{Currency, LockableCurrency, OnRuntimeUpgrade, ReservableCurrency}, + weights::RuntimeDbWeight, + Parameter, Twox64Concat, +}; +use sp_core::Get; +use sp_runtime::{traits::Zero, BoundedVec, Saturating}; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +const LOG_TARGET: &str = "runtime::democracy::migrations::unlock_and_unreserve_all_funds"; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// The configuration for [`UnlockAndUnreserveAllFunds`]. +pub trait UnlockConfig: 'static { + /// The account ID used in the runtime. + type AccountId: Parameter + Ord; + /// The currency type used in the runtime. + /// + /// Should match the currency type previously used for the pallet, if applicable. + type Currency: LockableCurrency + ReservableCurrency; + /// The name of the pallet as previously configured in + /// [`construct_runtime!`](frame_support::construct_runtime). + type PalletName: Get<&'static str>; + /// The maximum number of votes as configured previously in the runtime. + type MaxVotes: Get; + /// The maximum deposit as configured previously in the runtime. + type MaxDeposits: Get; + /// The DB weight as configured in the runtime to calculate the correct weight. + type DbWeight: Get; + /// The block number as configured in the runtime. + type BlockNumber: Parameter + Zero + Copy + Ord; +} + +#[storage_alias(dynamic)] +type DepositOf = StorageMap< + ::PalletName, + Twox64Concat, + PropIndex, + (BoundedVec<::AccountId, ::MaxDeposits>, BalanceOf), +>; + +#[storage_alias(dynamic)] +type VotingOf = StorageMap< + ::PalletName, + Twox64Concat, + ::AccountId, + Voting< + BalanceOf, + ::AccountId, + ::BlockNumber, + ::MaxVotes, + >, + ValueQuery, +>; + +/// A migration that unreserves all deposit and unlocks all stake held in the context of this +/// pallet. +/// +/// Useful to prevent funds from being locked up when the pallet is being deprecated. +/// +/// The pallet should be made inoperable before this migration is run. +/// +/// (See also [`RemovePallet`][frame_support::migrations::RemovePallet]) +pub struct UnlockAndUnreserveAllFunds(sp_std::marker::PhantomData); + +impl UnlockAndUnreserveAllFunds { + /// Calculates and returns the total amounts reserved by each account by this pallet, and all + /// accounts with locks in the context of this pallet. + /// + /// There is no need to return the amount locked, because the entire lock is removed (always + /// should be zero post-migration). We need to return the amounts reserved to check that the + /// reserved amount is deducted correctly. + /// + /// # Returns + /// + /// This function returns a tuple of two `BTreeMap` collections and the weight of the reads: + /// + /// * `BTreeMap>`: Map of account IDs to their respective total + /// reserved balance by this pallet + /// * `BTreeMap>`: Map of account IDs to their respective total + /// locked balance by this pallet + /// * `frame_support::weights::Weight`: the weight consumed by this call. + fn get_account_deposits_and_locks() -> ( + BTreeMap>, + BTreeMap>, + frame_support::weights::Weight, + ) { + let mut deposit_of_len = 0; + + // Get all deposits (reserved). + let mut total_voting_vec_entries: u64 = 0; + let account_deposits: BTreeMap> = DepositOf::::iter() + .flat_map(|(_prop_index, (accounts, balance))| { + // Count the number of deposits + deposit_of_len.saturating_inc(); + + // Track the total number of vec entries to calculate the weight of the reads. + total_voting_vec_entries.saturating_accrue(accounts.len() as u64); + + // Create a vec of tuples where each account is associated with the given balance + accounts.into_iter().map(|account| (account, balance)).collect::>() + }) + .fold(BTreeMap::new(), |mut acc, (account, balance)| { + // Add the balance to the account's existing balance in the accumulator + acc.entry(account.clone()).or_insert(Zero::zero()).saturating_accrue(balance); + acc + }); + + // Voter accounts have amounts locked. + let account_stakes: BTreeMap> = VotingOf::::iter() + .map(|(account_id, voting)| (account_id, voting.locked_balance())) + .collect(); + let voting_of_len = account_stakes.len() as u64; + + ( + account_deposits, + account_stakes, + T::DbWeight::get().reads( + deposit_of_len.saturating_add(voting_of_len).saturating_add( + // Max items in a Voting enum is MaxVotes + 5 + total_voting_vec_entries + .saturating_mul(T::MaxVotes::get().saturating_add(5) as u64), + ), + ), + ) + } +} + +impl OnRuntimeUpgrade for UnlockAndUnreserveAllFunds +where + BalanceOf: Sum, +{ + /// Collects pre-migration data useful for validating the migration was successful, and also + /// checks the integrity of deposited and reserved balances. + /// + /// Steps: + /// 1. Gets the deposited balances for each account stored in this pallet. + /// 2. Collects actual pre-migration reserved balances for each account. + /// 3. Checks the integrity of the deposited balances. + /// 4. Prints summary statistics about the state to be migrated. + /// 5. Encodes and returns pre-migration data to be used in post_upgrade. + /// + /// Fails with a `TryRuntimeError` if somehow the amount reserved by this pallet is greater than + /// the actual total reserved amount for any accounts. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + use sp_std::collections::btree_set::BTreeSet; + + // Get staked and deposited balances as reported by this pallet. + let (account_deposits, account_locks, _) = Self::get_account_deposits_and_locks(); + + let all_accounts = account_deposits + .keys() + .chain(account_locks.keys()) + .cloned() + .collect::>(); + let account_reserved_before: BTreeMap> = account_deposits + .keys() + .map(|account| (account.clone(), T::Currency::reserved_balance(&account))) + .collect(); + + // Total deposited for each account *should* be less than or equal to the total reserved, + // however this does not hold for all cases due to bugs in the reserve logic of this pallet. + let bugged_deposits = all_accounts + .iter() + .filter(|account| { + account_deposits.get(&account).unwrap_or(&Zero::zero()) > + account_reserved_before.get(&account).unwrap_or(&Zero::zero()) + }) + .count(); + + let total_deposits_to_unreserve = + account_deposits.clone().into_values().sum::>(); + let total_stake_to_unlock = account_locks.clone().into_values().sum::>(); + + log::info!(target: LOG_TARGET, "Total accounts: {:?}", all_accounts.len()); + log::info!(target: LOG_TARGET, "Total stake to unlock: {:?}", total_stake_to_unlock); + log::info!( + target: LOG_TARGET, + "Total deposit to unreserve: {:?}", + total_deposits_to_unreserve + ); + log::info!( + target: LOG_TARGET, + "Bugged deposits: {}/{}", + bugged_deposits, + account_deposits.len() + ); + + Ok(account_reserved_before.encode()) + } + + /// Executes the migration. + /// + /// Steps: + /// 1. Retrieves the deposit and accounts with locks for the pallet. + /// 2. Unreserves the deposited funds for each account. + /// 3. Unlocks the staked funds for each account. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Get staked and deposited balances as reported by this pallet. + let (account_deposits, account_stakes, initial_reads) = + Self::get_account_deposits_and_locks(); + + // Deposited funds need to be unreserved. + for (account, unreserve_amount) in account_deposits.iter() { + if unreserve_amount.is_zero() { + log::warn!(target: LOG_TARGET, "Unexpected zero amount to unreserve!"); + continue + } + T::Currency::unreserve(&account, *unreserve_amount); + } + + // Staked funds need to be unlocked. + for account in account_stakes.keys() { + T::Currency::remove_lock(DEMOCRACY_ID, account); + } + + T::DbWeight::get() + .reads_writes( + account_stakes.len().saturating_add(account_deposits.len()) as u64, + account_stakes.len().saturating_add(account_deposits.len()) as u64, + ) + .saturating_add(initial_reads) + } + + /// Performs post-upgrade sanity checks: + /// + /// 1. No locks remain for this pallet in Balances. + /// 2. The reserved balance for each account has been reduced by the expected amount. + #[cfg(feature = "try-runtime")] + fn post_upgrade( + account_reserved_before_bytes: Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + + let account_reserved_before = + BTreeMap::>::decode(&mut &account_reserved_before_bytes[..]) + .map_err(|_| "Failed to decode account_reserved_before_bytes")?; + + // Get staked and deposited balances as reported by this pallet. + let (account_deposits, _, _) = Self::get_account_deposits_and_locks(); + + // Check that the reserved balance is reduced by the expected deposited amount. + for (account, actual_reserved_before) in account_reserved_before { + let actual_reserved_after = T::Currency::reserved_balance(&account); + let expected_amount_deducted = *account_deposits + .get(&account) + .expect("account deposit must exist to be in pre_migration_data, qed"); + let expected_reserved_after = + actual_reserved_before.saturating_sub(expected_amount_deducted); + assert!( + actual_reserved_after == expected_reserved_after, + "Reserved balance for {:?} is incorrect. actual before: {:?}, actual after, {:?}, expected deducted: {:?}", + account, + actual_reserved_before, + actual_reserved_after, + expected_amount_deducted, + ); + } + + Ok(()) + } +} + +#[cfg(all(feature = "try-runtime", test))] +mod test { + use super::*; + use crate::{ + tests::{new_test_ext, Balances, Test}, + DepositOf, Voting, VotingOf, + }; + use frame_support::{ + assert_ok, parameter_types, + traits::{Currency, OnRuntimeUpgrade, ReservableCurrency, WithdrawReasons}, + BoundedVec, + }; + use frame_system::pallet_prelude::BlockNumberFor; + use sp_core::ConstU32; + + parameter_types! { + const PalletName: &'static str = "Democracy"; + } + + struct UnlockConfigImpl; + + impl super::UnlockConfig for UnlockConfigImpl { + type Currency = Balances; + type MaxVotes = ConstU32<100>; + type MaxDeposits = ConstU32<1000>; + type AccountId = u64; + type BlockNumber = BlockNumberFor; + type DbWeight = (); + type PalletName = PalletName; + } + + #[test] + fn unreserve_works_for_depositer() { + let depositer_0 = 10; + let depositer_1 = 11; + let deposit = 25; + let depositer_0_initial_reserved = 0; + let depositer_1_initial_reserved = 15; + let initial_balance = 100_000; + new_test_ext().execute_with(|| { + // Set up initial state. + ::Currency::make_free_balance_be(&depositer_0, initial_balance); + ::Currency::make_free_balance_be(&depositer_1, initial_balance); + assert_ok!(::Currency::reserve( + &depositer_0, + depositer_0_initial_reserved + deposit + )); + assert_ok!(::Currency::reserve( + &depositer_1, + depositer_1_initial_reserved + deposit + )); + let depositors = + BoundedVec::<_, ::MaxDeposits>::truncate_from(vec![ + depositer_0, + depositer_1, + ]); + DepositOf::::insert(0, (depositors, deposit)); + + // Sanity check: ensure initial reserved balance was set correctly. + assert_eq!( + ::Currency::reserved_balance(&depositer_0), + depositer_0_initial_reserved + deposit + ); + assert_eq!( + ::Currency::reserved_balance(&depositer_1), + depositer_1_initial_reserved + deposit + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the reserved balance was reduced by the expected amount. + assert_eq!( + ::Currency::reserved_balance(&depositer_0), + depositer_0_initial_reserved + ); + assert_eq!( + ::Currency::reserved_balance(&depositer_1), + depositer_1_initial_reserved + ); + }); + } + + #[test] + fn unlock_works_for_voter() { + let voter = 10; + let stake = 25; + let initial_locks = vec![(b"somethin", 10)]; + let initial_balance = 100_000; + new_test_ext().execute_with(|| { + // Set up initial state. + ::Currency::make_free_balance_be(&voter, initial_balance); + for lock in initial_locks.clone() { + ::Currency::set_lock( + *lock.0, + &voter, + lock.1, + WithdrawReasons::all(), + ); + } + VotingOf::::insert(voter, Voting::default()); + ::Currency::set_lock( + DEMOCRACY_ID, + &voter, + stake, + WithdrawReasons::all(), + ); + + // Sanity check: ensure initial Balance state was set up correctly. + let mut voter_all_locks = initial_locks.clone(); + voter_all_locks.push((&DEMOCRACY_ID, stake)); + assert_eq!( + ::Currency::locks(&voter) + .iter() + .map(|lock| (&lock.id, lock.amount)) + .collect::>(), + voter_all_locks + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the voter lock was removed + assert_eq!( + ::Currency::locks(&voter) + .iter() + .map(|lock| (&lock.id, lock.amount)) + .collect::>(), + initial_locks + ); + }); + } +} diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations/v1.rs similarity index 89% rename from frame/democracy/src/migrations.rs rename to frame/democracy/src/migrations/v1.rs index fe2e445bd02a6..27a500a615cff 100644 --- a/frame/democracy/src/migrations.rs +++ b/frame/democracy/src/migrations/v1.rs @@ -17,8 +17,9 @@ //! Storage migrations for the preimage pallet. -use super::*; +use crate::*; use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, BoundedVec}; +use frame_system::pallet_prelude::BlockNumberFor; use sp_core::H256; /// The log target. @@ -45,11 +46,7 @@ mod v0 { Pallet, frame_support::Twox64Concat, ReferendumIndex, - ReferendumInfo< - ::BlockNumber, - ::Hash, - BalanceOf, - >, + ReferendumInfo, ::Hash, BalanceOf>, >; } @@ -61,12 +58,12 @@ pub mod v1 { impl> OnRuntimeUpgrade for Migration { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + ensure!(StorageVersion::get::>() == 0, "can only upgrade from version 0"); let props_count = v0::PublicProps::::get().len(); log::info!(target: TARGET, "{} public proposals will be migrated.", props_count,); - ensure!(props_count <= T::MaxProposals::get() as usize, "too many proposals"); + ensure!(props_count <= T::MaxProposals::get() as usize, Error::::TooMany); let referenda_count = v0::ReferendumInfoOf::::iter().count(); log::info!(target: TARGET, "{} referenda will be migrated.", referenda_count); @@ -87,7 +84,7 @@ pub mod v1 { } ReferendumInfoOf::::translate( - |index, old: ReferendumInfo>| { + |index, old: ReferendumInfo, T::Hash, BalanceOf>| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); log::info!(target: TARGET, "migrating referendum #{:?}", &index); Some(match old { @@ -133,15 +130,15 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + ensure!(StorageVersion::get::>() == 1, "must upgrade"); let (old_props_count, old_ref_count): (u32, u32) = Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); let new_props_count = crate::PublicProps::::get().len() as u32; - assert_eq!(new_props_count, old_props_count, "must migrate all public proposals"); + ensure!(new_props_count == old_props_count, "must migrate all public proposals"); let new_ref_count = crate::ReferendumInfoOf::::iter().count() as u32; - assert_eq!(new_ref_count, old_ref_count, "must migrate all referenda"); + ensure!(new_ref_count == old_ref_count, "must migrate all referenda"); log::info!( target: TARGET, diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 06fde5129c6d0..e5cfcc5b40029 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -22,8 +22,8 @@ use crate as pallet_democracy; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, - SortedMembers, StorePreimage, + ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, SortedMembers, + StorePreimage, }, weights::Weight, }; @@ -31,9 +31,8 @@ use frame_system::{EnsureRoot, EnsureSigned, EnsureSignedBy}; use pallet_balances::{BalanceLock, Error as BalancesError}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, Hash, IdentityLookup}, - Perbill, + BuildStorage, Perbill, }; mod cancellation; mod decoders; @@ -51,16 +50,12 @@ const NAY: Vote = Vote { aye: false, conviction: Conviction::None }; const BIG_AYE: Vote = Vote { aye: true, conviction: Conviction::Locked1x }; const BIG_NAY: Vote = Vote { aye: false, conviction: Conviction::Locked1x }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Preimage: pallet_preimage, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, @@ -88,14 +83,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -146,7 +140,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -203,7 +197,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index c1b626fd9b7b9..b3fe9aa28e1ac 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -202,7 +202,7 @@ impl< .rejig(now); } - /// The amount of this account's balance that much currently be locked due to voting. + /// The amount of this account's balance that must currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { Voting::Direct { votes, prior, .. } => diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index a263f2982d862..241f6c3cb38de 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_democracy +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_democracy. pub trait WeightInfo { @@ -93,8 +97,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4801` // Estimated: `18187` - // Minimum execution time: 43_810_000 picoseconds. - Weight::from_parts(44_439_000, 18187) + // Minimum execution time: 49_339_000 picoseconds. + Weight::from_parts(50_942_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -104,8 +108,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3556` // Estimated: `6695` - // Minimum execution time: 40_003_000 picoseconds. - Weight::from_parts(40_448_000, 6695) + // Minimum execution time: 43_291_000 picoseconds. + Weight::from_parts(44_856_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -121,8 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3470` // Estimated: `7260` - // Minimum execution time: 54_737_000 picoseconds. - Weight::from_parts(55_154_000, 7260) + // Minimum execution time: 61_890_000 picoseconds. + Weight::from_parts(63_626_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -138,8 +142,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3492` // Estimated: `7260` - // Minimum execution time: 59_545_000 picoseconds. - Weight::from_parts(59_955_000, 7260) + // Minimum execution time: 67_802_000 picoseconds. + Weight::from_parts(69_132_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -153,8 +157,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `366` // Estimated: `3666` - // Minimum execution time: 27_886_000 picoseconds. - Weight::from_parts(28_372_000, 3666) + // Minimum execution time: 25_757_000 picoseconds. + Weight::from_parts(27_226_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -176,8 +180,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5910` // Estimated: `18187` - // Minimum execution time: 99_273_000 picoseconds. - Weight::from_parts(100_398_000, 18187) + // Minimum execution time: 113_060_000 picoseconds. + Weight::from_parts(114_813_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -189,8 +193,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3416` // Estimated: `6703` - // Minimum execution time: 14_946_000 picoseconds. - Weight::from_parts(15_114_000, 6703) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(13_794_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -200,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_870_000 picoseconds. - Weight::from_parts(4_083_000, 0) + // Minimum execution time: 3_213_000 picoseconds. + Weight::from_parts(3_429_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:0 w:1) @@ -210,8 +214,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_989_000 picoseconds. - Weight::from_parts(4_166_000, 0) + // Minimum execution time: 3_280_000 picoseconds. + Weight::from_parts(3_389_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:1 w:1) @@ -226,8 +230,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `3518` - // Minimum execution time: 29_776_000 picoseconds. - Weight::from_parts(30_186_000, 3518) + // Minimum execution time: 28_142_000 picoseconds. + Weight::from_parts(28_862_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -241,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3519` // Estimated: `6703` - // Minimum execution time: 33_891_000 picoseconds. - Weight::from_parts(34_265_000, 6703) + // Minimum execution time: 32_395_000 picoseconds. + Weight::from_parts(33_617_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -258,8 +262,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5821` // Estimated: `18187` - // Minimum execution time: 81_510_000 picoseconds. - Weight::from_parts(82_483_000, 18187) + // Minimum execution time: 92_255_000 picoseconds. + Weight::from_parts(93_704_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -271,8 +275,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `271` // Estimated: `3518` - // Minimum execution time: 21_164_000 picoseconds. - Weight::from_parts(21_624_000, 3518) + // Minimum execution time: 19_623_000 picoseconds. + Weight::from_parts(20_545_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -287,10 +291,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `244 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_925_000 picoseconds. - Weight::from_parts(10_624_198, 1489) - // Standard Error: 5_780 - .saturating_add(Weight::from_parts(2_934_169, 0).saturating_mul(r.into())) + // Minimum execution time: 7_032_000 picoseconds. + Weight::from_parts(7_931_421, 1489) + // Standard Error: 7_395 + .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -313,10 +317,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `244 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_551_000 picoseconds. - Weight::from_parts(13_126_123, 18187) - // Standard Error: 6_391 - .saturating_add(Weight::from_parts(2_952_789, 0).saturating_mul(r.into())) + // Minimum execution time: 10_524_000 picoseconds. + Weight::from_parts(10_369_064, 18187) + // Standard Error: 8_385 + .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -335,10 +339,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `830 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 47_172_000 picoseconds. - Weight::from_parts(49_667_954, 19800) - // Standard Error: 6_129 - .saturating_add(Weight::from_parts(4_230_402, 0).saturating_mul(r.into())) + // Minimum execution time: 46_106_000 picoseconds. + Weight::from_parts(48_936_654, 19800) + // Standard Error: 8_879 + .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -354,10 +358,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `493 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 22_360_000 picoseconds. - Weight::from_parts(25_063_237, 13530) - // Standard Error: 5_326 - .saturating_add(Weight::from_parts(4_163_683, 0).saturating_mul(r.into())) + // Minimum execution time: 21_078_000 picoseconds. + Weight::from_parts(22_732_737, 13530) + // Standard Error: 7_969 + .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -370,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_047_000 picoseconds. - Weight::from_parts(4_139_000, 0) + // Minimum execution time: 3_229_000 picoseconds. + Weight::from_parts(3_415_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) @@ -387,10 +391,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `7260` - // Minimum execution time: 27_322_000 picoseconds. - Weight::from_parts(39_909_589, 7260) - // Standard Error: 2_758 - .saturating_add(Weight::from_parts(29_497, 0).saturating_mul(r.into())) + // Minimum execution time: 25_735_000 picoseconds. + Weight::from_parts(41_341_468, 7260) + // Standard Error: 3_727 + .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -407,10 +411,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 37_082_000 picoseconds. - Weight::from_parts(38_580_061, 7260) - // Standard Error: 664 - .saturating_add(Weight::from_parts(62_401, 0).saturating_mul(r.into())) + // Minimum execution time: 36_233_000 picoseconds. + Weight::from_parts(39_836_017, 7260) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -423,10 +427,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `728 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 17_528_000 picoseconds. - Weight::from_parts(20_075_412, 7260) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(81_734, 0).saturating_mul(r.into())) + // Minimum execution time: 16_081_000 picoseconds. + Weight::from_parts(19_624_101, 7260) + // Standard Error: 1_639 + .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -439,10 +443,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `728 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 17_517_000 picoseconds. - Weight::from_parts(20_090_718, 7260) - // Standard Error: 1_105 - .saturating_add(Weight::from_parts(82_651, 0).saturating_mul(r.into())) + // Minimum execution time: 15_634_000 picoseconds. + Weight::from_parts(19_573_407, 7260) + // Standard Error: 1_790 + .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -456,8 +460,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `356` // Estimated: `3556` - // Minimum execution time: 19_234_000 picoseconds. - Weight::from_parts(19_755_000, 3556) + // Minimum execution time: 18_344_000 picoseconds. + Weight::from_parts(18_727_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -469,8 +473,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `3518` - // Minimum execution time: 17_621_000 picoseconds. - Weight::from_parts(17_861_000, 3518) + // Minimum execution time: 16_497_000 picoseconds. + Weight::from_parts(16_892_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -484,8 +488,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4888` // Estimated: `18187` - // Minimum execution time: 35_785_000 picoseconds. - Weight::from_parts(36_102_000, 18187) + // Minimum execution time: 39_517_000 picoseconds. + Weight::from_parts(40_632_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -497,8 +501,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4822` // Estimated: `18187` - // Minimum execution time: 33_493_000 picoseconds. - Weight::from_parts(33_747_000, 18187) + // Minimum execution time: 37_108_000 picoseconds. + Weight::from_parts(37_599_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -510,8 +514,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 15_557_000 picoseconds. - Weight::from_parts(15_844_000, 3556) + // Minimum execution time: 13_997_000 picoseconds. + Weight::from_parts(14_298_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -523,8 +527,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `302` // Estimated: `3666` - // Minimum execution time: 19_940_000 picoseconds. - Weight::from_parts(20_301_000, 3666) + // Minimum execution time: 18_122_000 picoseconds. + Weight::from_parts(18_655_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -544,8 +548,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4801` // Estimated: `18187` - // Minimum execution time: 43_810_000 picoseconds. - Weight::from_parts(44_439_000, 18187) + // Minimum execution time: 49_339_000 picoseconds. + Weight::from_parts(50_942_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -555,8 +559,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3556` // Estimated: `6695` - // Minimum execution time: 40_003_000 picoseconds. - Weight::from_parts(40_448_000, 6695) + // Minimum execution time: 43_291_000 picoseconds. + Weight::from_parts(44_856_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -572,8 +576,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3470` // Estimated: `7260` - // Minimum execution time: 54_737_000 picoseconds. - Weight::from_parts(55_154_000, 7260) + // Minimum execution time: 61_890_000 picoseconds. + Weight::from_parts(63_626_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -589,8 +593,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3492` // Estimated: `7260` - // Minimum execution time: 59_545_000 picoseconds. - Weight::from_parts(59_955_000, 7260) + // Minimum execution time: 67_802_000 picoseconds. + Weight::from_parts(69_132_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -604,8 +608,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `366` // Estimated: `3666` - // Minimum execution time: 27_886_000 picoseconds. - Weight::from_parts(28_372_000, 3666) + // Minimum execution time: 25_757_000 picoseconds. + Weight::from_parts(27_226_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -627,8 +631,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5910` // Estimated: `18187` - // Minimum execution time: 99_273_000 picoseconds. - Weight::from_parts(100_398_000, 18187) + // Minimum execution time: 113_060_000 picoseconds. + Weight::from_parts(114_813_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -640,8 +644,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3416` // Estimated: `6703` - // Minimum execution time: 14_946_000 picoseconds. - Weight::from_parts(15_114_000, 6703) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(13_794_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -651,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_870_000 picoseconds. - Weight::from_parts(4_083_000, 0) + // Minimum execution time: 3_213_000 picoseconds. + Weight::from_parts(3_429_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:0 w:1) @@ -661,8 +665,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_989_000 picoseconds. - Weight::from_parts(4_166_000, 0) + // Minimum execution time: 3_280_000 picoseconds. + Weight::from_parts(3_389_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy NextExternal (r:1 w:1) @@ -677,8 +681,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `286` // Estimated: `3518` - // Minimum execution time: 29_776_000 picoseconds. - Weight::from_parts(30_186_000, 3518) + // Minimum execution time: 28_142_000 picoseconds. + Weight::from_parts(28_862_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -692,8 +696,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3519` // Estimated: `6703` - // Minimum execution time: 33_891_000 picoseconds. - Weight::from_parts(34_265_000, 6703) + // Minimum execution time: 32_395_000 picoseconds. + Weight::from_parts(33_617_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -709,8 +713,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5821` // Estimated: `18187` - // Minimum execution time: 81_510_000 picoseconds. - Weight::from_parts(82_483_000, 18187) + // Minimum execution time: 92_255_000 picoseconds. + Weight::from_parts(93_704_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -722,8 +726,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `271` // Estimated: `3518` - // Minimum execution time: 21_164_000 picoseconds. - Weight::from_parts(21_624_000, 3518) + // Minimum execution time: 19_623_000 picoseconds. + Weight::from_parts(20_545_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -738,10 +742,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `244 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_925_000 picoseconds. - Weight::from_parts(10_624_198, 1489) - // Standard Error: 5_780 - .saturating_add(Weight::from_parts(2_934_169, 0).saturating_mul(r.into())) + // Minimum execution time: 7_032_000 picoseconds. + Weight::from_parts(7_931_421, 1489) + // Standard Error: 7_395 + .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -764,10 +768,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `244 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_551_000 picoseconds. - Weight::from_parts(13_126_123, 18187) - // Standard Error: 6_391 - .saturating_add(Weight::from_parts(2_952_789, 0).saturating_mul(r.into())) + // Minimum execution time: 10_524_000 picoseconds. + Weight::from_parts(10_369_064, 18187) + // Standard Error: 8_385 + .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -786,10 +790,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `830 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 47_172_000 picoseconds. - Weight::from_parts(49_667_954, 19800) - // Standard Error: 6_129 - .saturating_add(Weight::from_parts(4_230_402, 0).saturating_mul(r.into())) + // Minimum execution time: 46_106_000 picoseconds. + Weight::from_parts(48_936_654, 19800) + // Standard Error: 8_879 + .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -805,10 +809,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `493 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 22_360_000 picoseconds. - Weight::from_parts(25_063_237, 13530) - // Standard Error: 5_326 - .saturating_add(Weight::from_parts(4_163_683, 0).saturating_mul(r.into())) + // Minimum execution time: 21_078_000 picoseconds. + Weight::from_parts(22_732_737, 13530) + // Standard Error: 7_969 + .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -821,8 +825,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_047_000 picoseconds. - Weight::from_parts(4_139_000, 0) + // Minimum execution time: 3_229_000 picoseconds. + Weight::from_parts(3_415_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Democracy VotingOf (r:1 w:1) @@ -838,10 +842,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `563` // Estimated: `7260` - // Minimum execution time: 27_322_000 picoseconds. - Weight::from_parts(39_909_589, 7260) - // Standard Error: 2_758 - .saturating_add(Weight::from_parts(29_497, 0).saturating_mul(r.into())) + // Minimum execution time: 25_735_000 picoseconds. + Weight::from_parts(41_341_468, 7260) + // Standard Error: 3_727 + .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -858,10 +862,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 37_082_000 picoseconds. - Weight::from_parts(38_580_061, 7260) - // Standard Error: 664 - .saturating_add(Weight::from_parts(62_401, 0).saturating_mul(r.into())) + // Minimum execution time: 36_233_000 picoseconds. + Weight::from_parts(39_836_017, 7260) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -874,10 +878,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `728 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 17_528_000 picoseconds. - Weight::from_parts(20_075_412, 7260) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(81_734, 0).saturating_mul(r.into())) + // Minimum execution time: 16_081_000 picoseconds. + Weight::from_parts(19_624_101, 7260) + // Standard Error: 1_639 + .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -890,10 +894,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `728 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 17_517_000 picoseconds. - Weight::from_parts(20_090_718, 7260) - // Standard Error: 1_105 - .saturating_add(Weight::from_parts(82_651, 0).saturating_mul(r.into())) + // Minimum execution time: 15_634_000 picoseconds. + Weight::from_parts(19_573_407, 7260) + // Standard Error: 1_790 + .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -907,8 +911,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `356` // Estimated: `3556` - // Minimum execution time: 19_234_000 picoseconds. - Weight::from_parts(19_755_000, 3556) + // Minimum execution time: 18_344_000 picoseconds. + Weight::from_parts(18_727_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -920,8 +924,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `286` // Estimated: `3518` - // Minimum execution time: 17_621_000 picoseconds. - Weight::from_parts(17_861_000, 3518) + // Minimum execution time: 16_497_000 picoseconds. + Weight::from_parts(16_892_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -935,8 +939,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4888` // Estimated: `18187` - // Minimum execution time: 35_785_000 picoseconds. - Weight::from_parts(36_102_000, 18187) + // Minimum execution time: 39_517_000 picoseconds. + Weight::from_parts(40_632_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -948,8 +952,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4822` // Estimated: `18187` - // Minimum execution time: 33_493_000 picoseconds. - Weight::from_parts(33_747_000, 18187) + // Minimum execution time: 37_108_000 picoseconds. + Weight::from_parts(37_599_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -961,8 +965,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 15_557_000 picoseconds. - Weight::from_parts(15_844_000, 3556) + // Minimum execution time: 13_997_000 picoseconds. + Weight::from_parts(14_298_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -974,8 +978,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `302` // Estimated: `3666` - // Minimum execution time: 19_940_000 picoseconds. - Weight::from_parts(20_301_000, 3666) + // Minimum execution time: 18_122_000 picoseconds. + Weight::from_parts(18_655_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index c88ed0120fe59..a971897a751b0 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -12,7 +12,7 @@ description = "PALLET two phase election providers" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = [ @@ -23,12 +23,12 @@ log = { version = "0.4.17", default-features = false } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } # Optional imports for benchmarking @@ -40,10 +40,10 @@ strum = { version = "0.24.1", default-features = false, features = ["derive"], [dev-dependencies] parking_lot = "0.12.1" rand = "0.8.5" -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } @@ -70,11 +70,24 @@ std = [ "frame-benchmarking?/std", "rand/std", "strum/std", + "pallet-balances/std", + "sp-tracing/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand", "strum", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-election-provider-support-benchmarking?/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 761e1a42ca1eb..eac92834e288c 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -20,6 +20,7 @@ use super::*; use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; use frame_benchmarking::account; +use frame_election_provider_support::bounds::DataProviderBounds; use frame_support::{ assert_ok, traits::{Hooks, TryCollect}, @@ -169,10 +170,12 @@ fn set_up_data_provider(v: u32, t: u32) { let mut targets = (0..t) .map(|i| { let target = frame_benchmarking::account::("Target", i, SEED); + T::DataProvider::add_target(target.clone()); target }) .collect::>(); + // we should always have enough voters to fill. assert!( targets.len() > ::MaxVotesPerVoter::get() as usize @@ -268,15 +271,16 @@ frame_benchmarking::benchmarks! { // we don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); - let targets = T::DataProvider::electable_targets(None)?; - let voters = T::DataProvider::electing_voters(None)?; + // default bounds are unbounded. + let targets = T::DataProvider::electable_targets(DataProviderBounds::default())?; + let voters = T::DataProvider::electing_voters(DataProviderBounds::default())?; let desired_targets = T::DataProvider::desired_targets()?; assert!(>::snapshot().is_none()); }: { >::create_snapshot_internal(targets, voters, desired_targets) } verify { assert!(>::snapshot().is_some()); - assert_eq!(>::snapshot_metadata().ok_or("metadata missing")?.voters, v + t); + assert_eq!(>::snapshot_metadata().ok_or("metadata missing")?.voters, v); assert_eq!(>::snapshot_metadata().ok_or("metadata missing")?.targets, t); } @@ -313,7 +317,7 @@ frame_benchmarking::benchmarks! { assert!(>::get().is_none()); assert!(>::get().is_none()); assert!(>::get().is_none()); - assert_eq!(>::get(), >::Off); + assert_eq!(>::get(), >>::Off); } submit { @@ -457,7 +461,7 @@ frame_benchmarking::benchmarks! { >::create_snapshot().map_err(|_| "could not create snapshot")?; } verify { assert!(>::snapshot().is_some()); - assert_eq!(>::snapshot_metadata().ok_or("snapshot missing")?.voters, v + t); + assert_eq!(>::snapshot_metadata().ok_or("snapshot missing")?.voters, v); assert_eq!(>::snapshot_metadata().ok_or("snapshot missing")?.targets, t); } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index e7aa4c4fe65bc..2abdfd6e77f87 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -231,8 +231,9 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ElectionProviderBase, - InstantElectionProvider, NposSolution, + bounds::{CountBound, ElectionBounds, ElectionBoundsBuilder, SizeBound}, + BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, + ElectionProviderBase, InstantElectionProvider, NposSolution, }; use frame_support::{ dispatch::DispatchClass, @@ -241,7 +242,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; -use frame_system::{ensure_none, offchain::SendTransactionTypes}; +use frame_system::{ensure_none, offchain::SendTransactionTypes, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_arithmetic::{ traits::{CheckedAdd, Zero}, @@ -257,6 +258,9 @@ use sp_runtime::{ }; use sp_std::prelude::*; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(test)] @@ -582,10 +586,10 @@ pub mod pallet { /// Duration of the unsigned phase. #[pallet::constant] - type UnsignedPhase: Get; + type UnsignedPhase: Get>; /// Duration of the signed phase. #[pallet::constant] - type SignedPhase: Get; + type SignedPhase: Get>; /// The minimum amount of improvement to the solution score that defines a solution as /// "better" in the Signed phase. @@ -602,7 +606,7 @@ pub mod pallet { /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts /// to submit the worker's solution. #[pallet::constant] - type OffchainRepeat: Get; + type OffchainRepeat: Get>; /// The priority of the unsigned transaction submitted in the unsigned-phase #[pallet::constant] @@ -656,16 +660,6 @@ pub mod pallet { #[pallet::constant] type SignedDepositWeight: Get>; - /// The maximum number of electing voters to put in the snapshot. At the moment, snapshots - /// are only over a single block, but once multi-block elections are introduced they will - /// take place over multiple blocks. - #[pallet::constant] - type MaxElectingVoters: Get>; - - /// The maximum number of electable targets to put in the snapshot. - #[pallet::constant] - type MaxElectableTargets: Get>; - /// The maximum number of winners that can be elected by this `ElectionProvider` /// implementation. /// @@ -673,6 +667,11 @@ pub mod pallet { #[pallet::constant] type MaxWinners: Get; + /// The maximum number of electing voters and electable targets to put in the snapshot. + /// At the moment, snapshots are only over a single block, but once multi-block elections + /// are introduced they will take place over multiple blocks. + type ElectionBounds: Get; + /// Handler for the slashed deposits. type SlashHandler: OnUnbalanced>; @@ -682,13 +681,13 @@ pub mod pallet { /// Something that will provide the election data. type DataProvider: ElectionDataProvider< AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, + BlockNumber = BlockNumberFor, >; /// Configuration for the fallback. type Fallback: InstantElectionProvider< AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, + BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, MaxWinners = Self::MaxWinners, >; @@ -699,7 +698,7 @@ pub mod pallet { /// BoundedExecution<_>` if the test-net is not expected to have thousands of nominators. type GovernanceFallback: InstantElectionProvider< AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, + BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, MaxWinners = Self::MaxWinners, >; @@ -744,7 +743,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(now: T::BlockNumber) -> Weight { + fn on_initialize(now: BlockNumberFor) -> Weight { // First we check whether there is a phase that should be forced. match >::take() { // Only start a signed phase if the current phase is not already `Signed`. @@ -831,7 +830,7 @@ pub mod pallet { } } - fn offchain_worker(now: T::BlockNumber) { + fn offchain_worker(now: BlockNumberFor) { use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; // Create a lock with the maximum deadline of number of blocks in the unsigned phase. @@ -893,7 +892,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_n: T::BlockNumber) -> Result<(), &'static str> { + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { Self::do_try_state() } } @@ -1104,13 +1103,19 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); - let supports = - T::GovernanceFallback::instant_elect(maybe_max_voters, maybe_max_targets).map_err( - |e| { - log!(error, "GovernanceFallback failed: {:?}", e); - Error::::FallbackFailed - }, - )?; + let election_bounds = ElectionBoundsBuilder::default() + .voters_count(maybe_max_voters.unwrap_or(u32::MAX).into()) + .targets_count(maybe_max_targets.unwrap_or(u32::MAX).into()) + .build(); + + let supports = T::GovernanceFallback::instant_elect( + election_bounds.voters, + election_bounds.targets, + ) + .map_err(|e| { + log!(error, "GovernanceFallback failed: {:?}", e); + Error::::FallbackFailed + })?; // transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into // `BoundedVec<_, T::MaxWinners>` @@ -1196,7 +1201,11 @@ pub mod pallet { /// An account has been slashed for submitting an invalid signed submission. Slashed { account: ::AccountId, value: BalanceOf }, /// There was a phase transition in a given round. - PhaseTransitioned { from: Phase, to: Phase, round: u32 }, + PhaseTransitioned { + from: Phase>, + to: Phase>, + round: u32, + }, } /// Error of the pallet that can be returned in response to dispatches. @@ -1300,7 +1309,7 @@ pub mod pallet { /// Current phase. #[pallet::storage] #[pallet::getter(fn current_phase)] - pub type CurrentPhase = StorageValue<_, Phase, ValueQuery>; + pub type CurrentPhase = StorageValue<_, Phase>, ValueQuery>; /// Current best solution, signed or unsigned, queued to be returned upon `elect`. /// @@ -1390,13 +1399,13 @@ pub mod pallet { #[pallet::pallet] #[pallet::without_storage_info] #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(PhantomData); + pub struct Pallet(_); } impl Pallet { /// Internal logic of the offchain worker, to be executed only when the offchain lock is /// acquired with success. - fn do_synchronized_offchain_worker(now: T::BlockNumber) { + fn do_synchronized_offchain_worker(now: BlockNumberFor) { let current_phase = Self::current_phase(); log!(trace, "lock for offchain worker acquired. Phase = {:?}", current_phase); match current_phase { @@ -1447,7 +1456,7 @@ impl Pallet { } /// Phase transition helper. - pub(crate) fn phase_transition(to: Phase) { + pub(crate) fn phase_transition(to: Phase>) { log!(info, "Starting phase {:?}, round {}.", to, Self::round()); Self::deposit_event(Event::PhaseTransitioned { from: >::get(), @@ -1494,19 +1503,28 @@ impl Pallet { /// Extracted for easier weight calculation. fn create_snapshot_external( ) -> Result<(Vec, Vec>, u32), ElectionError> { - let target_limit = T::MaxElectableTargets::get().saturated_into::(); - let voter_limit = T::MaxElectingVoters::get().saturated_into::(); + let election_bounds = T::ElectionBounds::get(); - let targets = T::DataProvider::electable_targets(Some(target_limit)) + let targets = T::DataProvider::electable_targets(election_bounds.targets) + .and_then(|t| { + election_bounds.ensure_targets_limits( + CountBound(t.len() as u32), + SizeBound(t.encoded_size() as u32), + )?; + Ok(t) + }) .map_err(ElectionError::DataProvider)?; - let voters = T::DataProvider::electing_voters(Some(voter_limit)) + let voters = T::DataProvider::electing_voters(election_bounds.voters) + .and_then(|v| { + election_bounds.ensure_voters_limits( + CountBound(v.len() as u32), + SizeBound(v.encoded_size() as u32), + )?; + Ok(v) + }) .map_err(ElectionError::DataProvider)?; - if targets.len() > target_limit || voters.len() > voter_limit { - return Err(ElectionError::DataProvider("Snapshot too big for submission.")) - } - let mut desired_targets = as ElectionProviderBase>::desired_targets_checked() .map_err(|e| ElectionError::DataProvider(e))?; @@ -1612,18 +1630,25 @@ impl Pallet { // - signed phase was complete or not started, in which case finalization is idempotent and // inexpensive (1 read of an empty vector). let _ = Self::finalize_signed_phase(); + >::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - T::Fallback::instant_elect(None, None) - .map_err(|fe| ElectionError::Fallback(fe)) - .and_then(|supports| { - Ok(ReadySolution { - supports, - score: Default::default(), - compute: ElectionCompute::Fallback, - }) + // default data provider bounds are unbounded. calling `instant_elect` with + // unbounded data provider bounds means that the on-chain `T:Bounds` configs will + // *not* be overwritten. + T::Fallback::instant_elect( + DataProviderBounds::default(), + DataProviderBounds::default(), + ) + .map_err(|fe| ElectionError::Fallback(fe)) + .and_then(|supports| { + Ok(ReadySolution { + supports, + score: Default::default(), + compute: ElectionCompute::Fallback, }) + }) }) .map(|ReadySolution { compute, score, supports }| { Self::deposit_event(Event::ElectionFinalized { compute, score }); @@ -1654,7 +1679,7 @@ impl Pallet { #[cfg(feature = "try-runtime")] impl Pallet { - fn do_try_state() -> Result<(), &'static str> { + fn do_try_state() -> Result<(), TryRuntimeError> { Self::try_state_snapshot()?; Self::try_state_signed_submissions_map()?; Self::try_state_phase_off() @@ -1663,7 +1688,7 @@ impl Pallet { // [`Snapshot`] state check. Invariants: // - [`DesiredTargets`] exists if and only if [`Snapshot`] is present. // - [`SnapshotMetadata`] exist if and only if [`Snapshot`] is present. - fn try_state_snapshot() -> Result<(), &'static str> { + fn try_state_snapshot() -> Result<(), TryRuntimeError> { if >::exists() && >::exists() && >::exists() @@ -1675,7 +1700,7 @@ impl Pallet { { Ok(()) } else { - Err("If snapshot exists, metadata and desired targets should be set too. Otherwise, none should be set.") + Err("If snapshot exists, metadata and desired targets should be set too. Otherwise, none should be set.".into()) } } @@ -1683,28 +1708,34 @@ impl Pallet { // - All [`SignedSubmissionIndices`] are present in [`SignedSubmissionsMap`], and no more; // - [`SignedSubmissionNextIndex`] is not present in [`SignedSubmissionsMap`]; // - [`SignedSubmissionIndices`] is sorted by election score. - fn try_state_signed_submissions_map() -> Result<(), &'static str> { + fn try_state_signed_submissions_map() -> Result<(), TryRuntimeError> { let mut last_score: ElectionScore = Default::default(); let indices = >::get(); for (i, indice) in indices.iter().enumerate() { let submission = >::get(indice.2); if submission.is_none() { - return Err("All signed submissions indices must be part of the submissions map") + return Err( + "All signed submissions indices must be part of the submissions map".into() + ) } if i == 0 { last_score = indice.0 } else { if last_score.strict_threshold_better(indice.0, Perbill::zero()) { - return Err("Signed submission indices vector must be ordered by election score") + return Err( + "Signed submission indices vector must be ordered by election score".into() + ) } last_score = indice.0; } } if >::iter().nth(indices.len()).is_some() { - return Err("Signed submissions map length should be the same as the indices vec length") + return Err( + "Signed submissions map length should be the same as the indices vec length".into() + ) } match >::get() { @@ -1712,7 +1743,8 @@ impl Pallet { next => if >::get(next).is_some() { return Err( - "The next submissions index should not be in the submissions maps already", + "The next submissions index should not be in the submissions maps already" + .into(), ) } else { Ok(()) @@ -1722,12 +1754,12 @@ impl Pallet { // [`Phase::Off`] state check. Invariants: // - If phase is `Phase::Off`, [`Snapshot`] must be none. - fn try_state_phase_off() -> Result<(), &'static str> { + fn try_state_phase_off() -> Result<(), TryRuntimeError> { match Self::current_phase().is_off() { false => Ok(()), true => if >::get().is_some() { - Err("Snapshot must be none when in Phase::Off") + Err("Snapshot must be none when in Phase::Off".into()) } else { Ok(()) }, @@ -1737,7 +1769,7 @@ impl Pallet { impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; - type BlockNumber = T::BlockNumber; + type BlockNumber = BlockNumberFor; type Error = ElectionError; type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; @@ -1986,8 +2018,8 @@ mod tests { use crate::{ mock::{ multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, - ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, - SignedMaxSubmissions, System, TargetIndex, Targets, + ElectionsBounds, ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, + RuntimeOrigin, SignedMaxSubmissions, System, TargetIndex, Targets, Voters, }, Phase, }; @@ -2809,7 +2841,11 @@ mod tests { fn snapshot_too_big_failure_onchain_fallback() { // the `MockStaking` is designed such that if it has too many targets, it simply fails. ExtBuilder::default().build_and_execute(|| { - Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); + // sets bounds on number of targets. + let new_bounds = ElectionBoundsBuilder::default().targets_count(1_000.into()).build(); + ElectionsBounds::set(new_bounds); + + Targets::set((0..(1_000 as AccountId) + 1).collect::>()); // Signed phase failed to open. roll_to(15); @@ -2844,9 +2880,11 @@ mod tests { fn snapshot_too_big_failure_no_fallback() { // and if the backup mode is nothing, we go into the emergency mode.. ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - crate::mock::Targets::set( - (0..(TargetIndex::max_value() as AccountId) + 1).collect::>(), - ); + // sets bounds on number of targets. + let new_bounds = ElectionBoundsBuilder::default().targets_count(1_000.into()).build(); + ElectionsBounds::set(new_bounds); + + Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); // Signed phase failed to open. roll_to(15); @@ -2876,9 +2914,10 @@ mod tests { // but if there are too many voters, we simply truncate them. ExtBuilder::default().build_and_execute(|| { // we have 8 voters in total. - assert_eq!(crate::mock::Voters::get().len(), 8); + assert_eq!(Voters::get().len(), 8); // but we want to take 2. - crate::mock::MaxElectingVoters::set(2); + let new_bounds = ElectionBoundsBuilder::default().voters_count(2.into()).build(); + ElectionsBounds::set(new_bounds); // Signed phase opens just fine. roll_to_signed(); diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 732a650ce6db1..82c7279879feb 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -18,9 +18,8 @@ use super::*; use crate::{self as multi_phase, unsigned::MinerConfig}; use frame_election_provider_support::{ - data_provider, - onchain::{self}, - ElectionDataProvider, NposSolution, SequentialPhragmen, + bounds::{DataProviderBounds, ElectionBounds}, + data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, }; pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; use frame_support::{ @@ -45,7 +44,7 @@ use sp_npos_elections::{ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - PerU16, + BuildStorage, PerU16, }; use std::sync::Arc; @@ -54,12 +53,9 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Event, Config}, + System: frame_system::{Pallet, Call, Event, Config}, Balances: pallet_balances::{Pallet, Call, Event, Config}, MultiPhase: multi_phase::{Pallet, Call, Event}, } @@ -211,14 +207,13 @@ impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = BlockNumber; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); @@ -256,7 +251,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -304,7 +299,9 @@ parameter_types! { #[derive(Debug)] pub static MaxWinners: u32 = 200; - + // `ElectionBounds` and `OnChainElectionsBounds` are defined separately to set them independently in the tests. + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); + pub static OnChainElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static EpochLength: u64 = 30; pub static OnChainFallback: bool = true; } @@ -316,14 +313,13 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = StakingMock; type WeightInfo = (); type MaxWinners = MaxWinners; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = OnChainElectionsBounds; } pub struct MockFallback; impl ElectionProviderBase for MockFallback { + type BlockNumber = BlockNumber; type AccountId = AccountId; - type BlockNumber = u64; type Error = &'static str; type DataProvider = StakingMock; type MaxWinners = MaxWinners; @@ -331,12 +327,15 @@ impl ElectionProviderBase for MockFallback { impl InstantElectionProvider for MockFallback { fn instant_elect( - max_voters: Option, - max_targets: Option, + voters_bounds: DataProviderBounds, + targets_bounds: DataProviderBounds, ) -> Result, Self::Error> { if OnChainFallback::get() { - onchain::OnChainExecution::::instant_elect(max_voters, max_targets) - .map_err(|_| "onchain::OnChainExecution failed.") + onchain::OnChainExecution::::instant_elect( + voters_bounds, + targets_bounds, + ) + .map_err(|_| "onchain::OnChainExecution failed.") } else { Err("NoFallback.") } @@ -408,11 +407,10 @@ impl crate::Config for Runtime { type GovernanceFallback = frame_election_provider_support::onchain::OnChainExecution; type ForceOrigin = frame_system::EnsureRoot; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxWinners; type MinerConfig = Self; type Solver = SequentialPhragmen, Balancing>; + type ElectionBounds = ElectionsBounds; } impl frame_system::offchain::SendTransactionTypes for Runtime @@ -436,15 +434,15 @@ pub struct ExtBuilder {} pub struct StakingMock; impl ElectionDataProvider for StakingMock { + type BlockNumber = BlockNumber; type AccountId = AccountId; - type BlockNumber = u64; type MaxVotesPerVoter = MaxNominations; - fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { + fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { let targets = Targets::get(); if !DataProviderAllowBadData::get() && - maybe_max_len.map_or(false, |max_len| targets.len() > max_len) + bounds.count.map_or(false, |max_len| targets.len() > max_len.0 as usize) { return Err("Targets too big") } @@ -452,13 +450,12 @@ impl ElectionDataProvider for StakingMock { Ok(targets) } - fn electing_voters( - maybe_max_len: Option, - ) -> data_provider::Result>> { + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { let mut voters = Voters::get(); + if !DataProviderAllowBadData::get() { - if let Some(max_len) = maybe_max_len { - voters.truncate(max_len) + if let Some(max_len) = bounds.count { + voters.truncate(max_len.0 as usize) } } @@ -505,12 +502,6 @@ impl ElectionDataProvider for StakingMock { let mut current = Targets::get(); current.push(target); Targets::set(current); - - // to be on-par with staking, we add a self vote as well. the stake is really not that - // important. - let mut current = Voters::get(); - current.push((target, ExistentialDeposit::get() as u64, bounded_vec![target])); - Voters::set(current); } } @@ -574,7 +565,7 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = - frame_system::GenesisConfig::default().build_storage::().unwrap(); + frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = pallet_balances::GenesisConfig:: { balances: vec![ diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index bde985518d53e..76068ba99d36c 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -27,6 +27,7 @@ use frame_election_provider_support::NposSolution; use frame_support::traits::{ defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_arithmetic::traits::SaturatedConversion; use sp_core::bounded::BoundedVec; use sp_npos_elections::ElectionScore; @@ -100,10 +101,8 @@ pub type SignedSubmissionOf = SignedSubmission< /// Always sorted vector of a score, submitted at the given block number, which can be found at the /// given index (`u32`) of the `SignedSubmissionsMap`. -pub type SubmissionIndicesOf = BoundedVec< - (ElectionScore, ::BlockNumber, u32), - ::SignedMaxSubmissions, ->; +pub type SubmissionIndicesOf = + BoundedVec<(ElectionScore, BlockNumberFor, u32), ::SignedMaxSubmissions>; /// Outcome of [`SignedSubmissions::insert`]. pub enum InsertResult { @@ -216,7 +215,7 @@ impl SignedSubmissions { fn swap_out_submission( &mut self, remove_pos: usize, - insert: Option<(ElectionScore, T::BlockNumber, u32)>, + insert: Option<(ElectionScore, BlockNumberFor, u32)>, ) -> Option> { if remove_pos >= self.indices.len() { return None @@ -537,7 +536,10 @@ impl Pallet { #[cfg(test)] mod tests { use super::*; - use crate::{mock::*, ElectionCompute, ElectionError, Error, Event, Perbill, Phase}; + use crate::{ + mock::*, ElectionBoundsBuilder, ElectionCompute, ElectionError, Error, Event, Perbill, + Phase, + }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; #[test] @@ -566,13 +568,14 @@ mod tests { fn data_provider_should_respect_target_limits() { ExtBuilder::default().build_and_execute(|| { // given a reduced expectation of maximum electable targets - MaxElectableTargets::set(2); + let new_bounds = ElectionBoundsBuilder::default().targets_count(2.into()).build(); + ElectionsBounds::set(new_bounds); // and a data provider that does not respect limits DataProviderAllowBadData::set(true); assert_noop!( MultiPhase::create_snapshot(), - ElectionError::DataProvider("Snapshot too big for submission."), + ElectionError::DataProvider("Ensure targets bounds: bounds exceeded."), ); }) } @@ -581,13 +584,14 @@ mod tests { fn data_provider_should_respect_voter_limits() { ExtBuilder::default().build_and_execute(|| { // given a reduced expectation of maximum electing voters - MaxElectingVoters::set(2); + let new_bounds = ElectionBoundsBuilder::default().voters_count(2.into()).build(); + ElectionsBounds::set(new_bounds); // and a data provider that does not respect limits DataProviderAllowBadData::set(true); assert_noop!( MultiPhase::create_snapshot(), - ElectionError::DataProvider("Snapshot too big for submission."), + ElectionError::DataProvider("Ensure voters bounds: bounds exceeded."), ); }) } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 9c09cb48c7c05..e21e6c5e6d229 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{DefensiveResult, Get}, BoundedVec, }; -use frame_system::offchain::SubmitTransaction; +use frame_system::{offchain::SubmitTransaction, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, ElectionResult, @@ -298,12 +298,12 @@ impl Pallet { /// /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If `Ok()` /// is returned, `now` is written in storage and will be used in further calls as the baseline. - pub fn ensure_offchain_repeat_frequency(now: T::BlockNumber) -> Result<(), MinerError> { + pub fn ensure_offchain_repeat_frequency(now: BlockNumberFor) -> Result<(), MinerError> { let threshold = T::OffchainRepeat::get(); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); let mutate_stat = last_block.mutate::<_, &'static str, _>( - |maybe_head: Result, _>| { + |maybe_head: Result>, _>| { match maybe_head { Ok(Some(head)) if now < head => Err("fork."), Ok(Some(head)) if now >= head && now <= head + threshold => diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index c7636ded484e2..3d7b944337ffb 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_election_provider_multi_phase +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_election_provider_multi_phase. pub trait WeightInfo { @@ -82,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `994` + // Measured: `1028` // Estimated: `3481` - // Minimum execution time: 21_239_000 picoseconds. - Weight::from_parts(21_970_000, 3481) + // Minimum execution time: 22_089_000 picoseconds. + Weight::from_parts(22_677_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -94,10 +98,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `1599` - // Minimum execution time: 13_913_000 picoseconds. - Weight::from_parts(14_329_000, 1599) + // Measured: `148` + // Estimated: `1633` + // Minimum execution time: 11_986_000 picoseconds. + Weight::from_parts(12_445_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,10 +111,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `1599` - // Minimum execution time: 15_377_000 picoseconds. - Weight::from_parts(15_714_000, 1599) + // Measured: `148` + // Estimated: `1633` + // Minimum execution time: 12_988_000 picoseconds. + Weight::from_parts(13_281_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -122,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_455_000, 3593) + // Minimum execution time: 32_659_000 picoseconds. + Weight::from_parts(33_281_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -133,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_532_000 picoseconds. - Weight::from_parts(23_039_000, 3593) + // Minimum execution time: 22_471_000 picoseconds. + Weight::from_parts(23_046_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -150,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_511_000 picoseconds. - Weight::from_parts(261_190_000, 0) - // Standard Error: 1_621 - .saturating_add(Weight::from_parts(157_608, 0).saturating_mul(v.into())) + // Minimum execution time: 262_360_000 picoseconds. + Weight::from_parts(279_313_000, 0) + // Standard Error: 2_384 + .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -178,14 +182,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `337 + a * (768 ±0) + d * (48 ±0)` - // Estimated: `3889 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 284_994_000 picoseconds. - Weight::from_parts(97_696_734, 3889) - // Standard Error: 4_172 - .saturating_add(Weight::from_parts(331_569, 0).saturating_mul(a.into())) - // Standard Error: 6_254 - .saturating_add(Weight::from_parts(92_198, 0).saturating_mul(d.into())) + // Measured: `371 + a * (768 ±0) + d * (48 ±0)` + // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` + // Minimum execution time: 301_283_000 picoseconds. + Weight::from_parts(324_586_000, 3923) + // Standard Error: 4_763 + .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -205,10 +207,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `893` - // Estimated: `2378` - // Minimum execution time: 52_194_000 picoseconds. - Weight::from_parts(53_062_000, 2378) + // Measured: `927` + // Estimated: `2412` + // Minimum execution time: 52_276_000 picoseconds. + Weight::from_parts(53_846_000, 2412) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -232,14 +234,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[200, 400]`. fn submit_unsigned(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `219 + t * (32 ±0) + v * (553 ±0)` - // Estimated: `1704 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_843_067_000 picoseconds. - Weight::from_parts(4_860_833_000, 1704) - // Standard Error: 14_594 - .saturating_add(Weight::from_parts(76_611, 0).saturating_mul(v.into())) - // Standard Error: 43_249 - .saturating_add(Weight::from_parts(4_347_887, 0).saturating_mul(a.into())) + // Measured: `253 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 5_448_459_000 picoseconds. + Weight::from_parts(5_525_622_000, 1738) + // Standard Error: 21_478 + .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) + // Standard Error: 63_648 + .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -280,14 +282,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[200, 400]`. fn feasibility_check(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `194 + t * (32 ±0) + v * (553 ±0)` - // Estimated: `1679 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_190_524_000 picoseconds. - Weight::from_parts(4_200_207_000, 1679) - // Standard Error: 12_454 - .saturating_add(Weight::from_parts(166_342, 0).saturating_mul(v.into())) - // Standard Error: 36_906 - .saturating_add(Weight::from_parts(3_493_372, 0).saturating_mul(a.into())) + // Measured: `228 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_724_399_000 picoseconds. + Weight::from_parts(4_886_472_000, 1713) + // Standard Error: 15_220 + .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) + // Standard Error: 45_104 + .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) @@ -314,10 +316,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `994` + // Measured: `1028` // Estimated: `3481` - // Minimum execution time: 21_239_000 picoseconds. - Weight::from_parts(21_970_000, 3481) + // Minimum execution time: 22_089_000 picoseconds. + Weight::from_parts(22_677_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -326,10 +328,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `1599` - // Minimum execution time: 13_913_000 picoseconds. - Weight::from_parts(14_329_000, 1599) + // Measured: `148` + // Estimated: `1633` + // Minimum execution time: 11_986_000 picoseconds. + Weight::from_parts(12_445_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -339,10 +341,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `1599` - // Minimum execution time: 15_377_000 picoseconds. - Weight::from_parts(15_714_000, 1599) + // Measured: `148` + // Estimated: `1633` + // Minimum execution time: 12_988_000 picoseconds. + Weight::from_parts(13_281_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -354,8 +356,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_455_000, 3593) + // Minimum execution time: 32_659_000 picoseconds. + Weight::from_parts(33_281_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -365,8 +367,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_532_000 picoseconds. - Weight::from_parts(23_039_000, 3593) + // Minimum execution time: 22_471_000 picoseconds. + Weight::from_parts(23_046_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -382,10 +384,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_511_000 picoseconds. - Weight::from_parts(261_190_000, 0) - // Standard Error: 1_621 - .saturating_add(Weight::from_parts(157_608, 0).saturating_mul(v.into())) + // Minimum execution time: 262_360_000 picoseconds. + Weight::from_parts(279_313_000, 0) + // Standard Error: 2_384 + .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -410,14 +412,12 @@ impl WeightInfo for () { /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `337 + a * (768 ±0) + d * (48 ±0)` - // Estimated: `3889 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 284_994_000 picoseconds. - Weight::from_parts(97_696_734, 3889) - // Standard Error: 4_172 - .saturating_add(Weight::from_parts(331_569, 0).saturating_mul(a.into())) - // Standard Error: 6_254 - .saturating_add(Weight::from_parts(92_198, 0).saturating_mul(d.into())) + // Measured: `371 + a * (768 ±0) + d * (48 ±0)` + // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` + // Minimum execution time: 301_283_000 picoseconds. + Weight::from_parts(324_586_000, 3923) + // Standard Error: 4_763 + .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -437,10 +437,10 @@ impl WeightInfo for () { /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `893` - // Estimated: `2378` - // Minimum execution time: 52_194_000 picoseconds. - Weight::from_parts(53_062_000, 2378) + // Measured: `927` + // Estimated: `2412` + // Minimum execution time: 52_276_000 picoseconds. + Weight::from_parts(53_846_000, 2412) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -464,14 +464,14 @@ impl WeightInfo for () { /// The range of component `d` is `[200, 400]`. fn submit_unsigned(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `219 + t * (32 ±0) + v * (553 ±0)` - // Estimated: `1704 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_843_067_000 picoseconds. - Weight::from_parts(4_860_833_000, 1704) - // Standard Error: 14_594 - .saturating_add(Weight::from_parts(76_611, 0).saturating_mul(v.into())) - // Standard Error: 43_249 - .saturating_add(Weight::from_parts(4_347_887, 0).saturating_mul(a.into())) + // Measured: `253 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 5_448_459_000 picoseconds. + Weight::from_parts(5_525_622_000, 1738) + // Standard Error: 21_478 + .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) + // Standard Error: 63_648 + .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -512,14 +512,14 @@ impl WeightInfo for () { /// The range of component `d` is `[200, 400]`. fn feasibility_check(v: u32, t: u32, a: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `194 + t * (32 ±0) + v * (553 ±0)` - // Estimated: `1679 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_190_524_000 picoseconds. - Weight::from_parts(4_200_207_000, 1679) - // Standard Error: 12_454 - .saturating_add(Weight::from_parts(166_342, 0).saturating_mul(v.into())) - // Standard Error: 36_906 - .saturating_add(Weight::from_parts(3_493_372, 0).saturating_mul(a.into())) + // Measured: `228 + t * (32 ±0) + v * (553 ±0)` + // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` + // Minimum execution time: 4_724_399_000 picoseconds. + Weight::from_parts(4_886_472_000, 1713) + // Standard Error: 15_220 + .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) + // Standard Error: 45_104 + .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) diff --git a/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index 00dd8708e2280..01644887759c0 100644 --- a/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -13,17 +13,18 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +parking_lot = "0.12.1" +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } scale-info = { version = "2.0.1", features = ["derive"] } log = { version = "0.4.17", default-features = false } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } -sp-std = { version = "5.0.0", path = "../../../primitives/std" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../../primitives/std" } sp-staking = { version = "4.0.0-dev", path = "../../../primitives/staking" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } frame-system = { version = "4.0.0-dev", path = "../../system" } frame-support = { version = "4.0.0-dev", path = "../../support" } diff --git a/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index b1f59258b0eb8..5d7f9deda0791 100644 --- a/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -21,7 +21,7 @@ mod mock; pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_election_provider_support::ElectionProvider; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; use pallet_election_provider_multi_phase::Phase; use sp_core::Get; @@ -55,12 +55,14 @@ fn log_current_time() { #[test] fn block_progression_works() { - ExtBuilder::default().build_and_execute(|| { + let (mut ext, pool_state, _) = ExtBuilder::default().build_offchainify(); + + ext.execute_with(|| { assert_eq!(active_era(), 0); assert_eq!(Session::current_index(), 0); assert!(ElectionProviderMultiPhase::current_phase().is_off()); - assert!(start_next_active_era().is_ok()); + assert!(start_next_active_era(pool_state.clone()).is_ok()); assert_eq!(active_era(), 1); assert_eq!(Session::current_index(), >::get()); @@ -70,12 +72,14 @@ fn block_progression_works() { assert!(ElectionProviderMultiPhase::current_phase().is_signed()); }); - ExtBuilder::default().build_and_execute(|| { + let (mut ext, pool_state, _) = ExtBuilder::default().build_offchainify(); + + ext.execute_with(|| { assert_eq!(active_era(), 0); assert_eq!(Session::current_index(), 0); assert!(ElectionProviderMultiPhase::current_phase().is_off()); - assert!(start_next_active_era_delayed_solution().is_ok()); + assert!(start_next_active_era_delayed_solution(pool_state).is_ok()); // if the solution is delayed, EPM will end up in emergency mode.. assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); // .. era won't progress.. @@ -85,6 +89,45 @@ fn block_progression_works() { }) } +#[test] +fn offchainify_works() { + use pallet_election_provider_multi_phase::QueuedSolution; + + let staking_builder = StakingExtBuilder::default(); + let epm_builder = EpmExtBuilder::default(); + let (mut ext, pool_state, _) = ExtBuilder::default() + .epm(epm_builder) + .staking(staking_builder) + .build_offchainify(); + + ext.execute_with(|| { + // test ocw progression and solution queue if submission when unsigned phase submission is + // not delayed. + for _ in 0..100 { + roll_one(pool_state.clone(), false); + let current_phase = ElectionProviderMultiPhase::current_phase(); + + assert!( + match QueuedSolution::::get() { + Some(_) => current_phase.is_unsigned(), + None => !current_phase.is_unsigned(), + }, + "solution must be queued *only* in unsigned phase" + ); + } + + // test ocw solution queue if submission in unsigned phase is delayed. + for _ in 0..100 { + roll_one(pool_state.clone(), true); + assert_eq!( + QueuedSolution::::get(), + None, + "solution must never be submitted and stored since it is delayed" + ); + } + }) +} + #[test] /// Replicates the Kusama incident of 8th Dec 2022 and its resolution through the governance /// fallback. @@ -101,8 +144,9 @@ fn block_progression_works() { /// restarts. Note that in this test case, the emergency throttling is disabled. fn enters_emergency_phase_after_forcing_before_elect() { let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); + let (mut ext, pool_state, _) = ExtBuilder::default().epm(epm_builder).build_offchainify(); - ExtBuilder::default().epm(epm_builder).build_and_execute(|| { + ext.execute_with(|| { log!( trace, "current validators (staking): {:?}", @@ -119,15 +163,15 @@ fn enters_emergency_phase_after_forcing_before_elect() { assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::ForceNew); - advance_session_delayed_solution(); + advance_session_delayed_solution(pool_state.clone()); assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); log_current_time(); let era_before_delayed_next = Staking::current_era(); // try to advance 2 eras. - assert!(start_next_active_era_delayed_solution().is_ok()); + assert!(start_next_active_era_delayed_solution(pool_state.clone()).is_ok()); assert_eq!(Staking::current_era(), era_before_delayed_next); - assert!(start_next_active_era().is_err()); + assert!(start_next_active_era(pool_state).is_err()); assert_eq!(Staking::current_era(), era_before_delayed_next); // EPM is still in emergency phase. @@ -171,41 +215,43 @@ fn continous_slashes_below_offending_threshold() { let staking_builder = StakingExtBuilder::default().validator_count(10); let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); - ExtBuilder::default() - .staking(staking_builder) + let (mut ext, pool_state, _) = ExtBuilder::default() .epm(epm_builder) - .build_and_execute(|| { - assert_eq!(Session::validators().len(), 10); - let mut active_validator_set = Session::validators(); - - roll_to_epm_signed(); + .staking(staking_builder) + .build_offchainify(); - // set a minimum election score. - assert!(set_minimum_election_score(500, 1000, 500).is_ok()); + ext.execute_with(|| { + assert_eq!(Session::validators().len(), 10); + let mut active_validator_set = Session::validators(); - // slash 10% of the active validators and progress era until the minimum trusted score - // is reached. - while active_validator_set.len() > 0 { - let slashed = slash_percentage(Perbill::from_percent(10)); - assert_eq!(slashed.len(), 1); + roll_to_epm_signed(); - // break loop when era does not progress; EPM is in emergency phase as election - // failed due to election minimum score. - if start_next_active_era().is_err() { - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - break - } + // set a minimum election score. + assert!(set_minimum_election_score(500, 1000, 500).is_ok()); - active_validator_set = Session::validators(); + // slash 10% of the active validators and progress era until the minimum trusted score + // is reached. + while active_validator_set.len() > 0 { + let slashed = slash_percentage(Perbill::from_percent(10)); + assert_eq!(slashed.len(), 1); - log!( - trace, - "slashed 10% of active validators ({:?}). After slash: {:?}", - slashed, - active_validator_set - ); + // break loop when era does not progress; EPM is in emergency phase as election + // failed due to election minimum score. + if start_next_active_era(pool_state.clone()).is_err() { + assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + break } - }); + + active_validator_set = Session::validators(); + + log!( + trace, + "slashed 10% of active validators ({:?}). After slash: {:?}", + slashed, + active_validator_set + ); + } + }); } #[test] @@ -278,6 +324,8 @@ fn transition_to_signed_phase_from_unsigned() { }); } +/// Slashed validator sets intentions in the same era of slashing. +/// /// When validators are slashed, they are chilled and removed from the current `VoterList`. Thus, /// the slashed validator should not be considered in the next validator set. However, if the /// slashed validator sets its intention to validate again in the same era when it was slashed and @@ -295,53 +343,119 @@ fn set_validation_intention_after_chilled() { use frame_election_provider_support::SortedListProvider; use pallet_staking::{Event, Forcing, Nominators}; - let staking_builder = StakingExtBuilder::default(); - let epm_builder = EpmExtBuilder::default(); - ExtBuilder::default() - .staking(staking_builder) - .epm(epm_builder) - .build_and_execute(|| { - assert_eq!(active_era(), 0); - // validator is part of the validator set. - assert!(Session::validators().contains(&81)); - assert!(::VoterList::contains(&81)); - - // nominate validator 81. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(21), vec![81])); - assert_eq!(Nominators::::get(21).unwrap().targets, vec![81]); - - // validator is slashed. it is removed from the `VoterList` through chilling but in the - // current era, the validator is still part of the active validator set. - add_slash(&81); - assert!(Session::validators().contains(&81)); - assert!(!::VoterList::contains(&81)); - assert_eq!( - staking_events(), - [ - Event::Chilled { stash: 81 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 81, - slash_era: 0, - fraction: Perbill::from_percent(10) - } - ], - ); + let (mut ext, pool_state, _) = ExtBuilder::default() + .epm(EpmExtBuilder::default()) + .staking(StakingExtBuilder::default()) + .build_offchainify(); + + ext.execute_with(|| { + assert_eq!(active_era(), 0); + // validator is part of the validator set. + assert!(Session::validators().contains(&41)); + assert!(::VoterList::contains(&41)); + + // nominate validator 81. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(21), vec![41])); + assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); + + // validator is slashed. it is removed from the `VoterList` through chilling but in the + // current era, the validator is still part of the active validator set. + add_slash(&41); + assert!(Session::validators().contains(&41)); + assert!(!::VoterList::contains(&41)); + assert_eq!( + staking_events(), + [ + Event::Chilled { stash: 41 }, + Event::ForceEra { mode: Forcing::ForceNew }, + Event::SlashReported { + validator: 41, + slash_era: 0, + fraction: Perbill::from_percent(10) + } + ], + ); + + // after the nominator is slashed and chilled, the nominations remain. + assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); + + // validator sets intention to stake again in the same era it was chilled. + assert_ok!(Staking::validate(RuntimeOrigin::signed(41), Default::default())); + + // progress era and check that the slashed validator is still part of the validator + // set. + assert!(start_next_active_era(pool_state).is_ok()); + assert_eq!(active_era(), 1); + assert!(Session::validators().contains(&41)); + assert!(::VoterList::contains(&41)); + + // nominations are still active as before the slash. + assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); + }) +} + +#[test] +/// Active ledger balance may fall below ED if account chills before unbounding. +/// +/// Unbonding call fails if the remaining ledger's stash balance falls below the existential +/// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may +/// be below ED. In that case, only the stash (or root) can kill the ledger entry by calling +/// `withdraw_unbonded` after the bonding period has passed. +/// +/// Related to . +fn ledger_consistency_active_balance_below_ed() { + use pallet_staking::{Error, Event}; + + let (mut ext, pool_state, _) = + ExtBuilder::default().staking(StakingExtBuilder::default()).build_offchainify(); + + ext.execute_with(|| { + assert_eq!(Staking::ledger(&11).unwrap().active, 1000); + + // unbonding total of active stake fails because the active ledger balance would fall + // below the `MinNominatorBond`. + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(11), 1000), + Error::::InsufficientBond + ); + + // however, chilling works as expected. + assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); + + // now unbonding the full active balance works, since remainer of the active balance is + // not enforced to be below `MinNominatorBond` if the stash has been chilled. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); + + // the active balance of the ledger entry is 0, while total balance is 1000 until + // `withdraw_unbonded` is called. + assert_eq!(Staking::ledger(&11).unwrap().active, 0); + assert_eq!(Staking::ledger(&11).unwrap().total, 1000); - // after the nominator is slashed and chilled, the nominations remain. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![81]); + // trying to withdraw the unbonded balance won't work yet because not enough bonding + // eras have passed. + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!(Staking::ledger(&11).unwrap().total, 1000); - // validator sets intention to stake again in the same era it was chilled. - assert_ok!(Staking::validate(RuntimeOrigin::signed(81), Default::default())); + // tries to reap stash after chilling, which fails since the stash total balance is + // above ED. + assert_err!( + Staking::reap_stash(RuntimeOrigin::signed(11), 21, 0), + Error::::FundedTarget, + ); - // progress era and check that the slashed validator is still part of the validator - // set. - assert!(start_next_active_era().is_ok()); - assert_eq!(active_era(), 1); - assert!(Session::validators().contains(&81)); - assert!(::VoterList::contains(&81)); + // check the events so far: 1x Chilled and 1x Unbounded + assert_eq!( + staking_events(), + [Event::Chilled { stash: 11 }, Event::Unbonded { stash: 11, amount: 1000 }] + ); - // nominations are still active as before the slash. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![81]); - }) + // after advancing `BondingDuration` eras, the `withdraw_unbonded` will unlock the + // chunks and the ledger entry will be cleared, since the ledger active balance is 0. + advance_eras( + ::BondingDuration::get() as usize, + pool_state, + ); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!(Staking::ledger(&11), None); + }); } diff --git a/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 490179e91ddda..9c3511ae35751 100644 --- a/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -17,19 +17,21 @@ #![allow(dead_code)] -use _feps::ExtendedBalance; use frame_support::{ - parameter_types, traits, - traits::{GenesisBuild, Hooks}, + assert_ok, dispatch::UnfilteredDispatchable, parameter_types, traits, traits::Hooks, weights::constants, }; use frame_system::EnsureRoot; -use sp_core::{ConstU32, Get, H256}; +use sp_core::{ConstU32, Get}; use sp_npos_elections::{ElectionScore, VoteWeight}; use sp_runtime::{ + offchain::{ + testing::{OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, testing, - traits::{IdentityLookup, Zero}, - transaction_validity, PerU16, Perbill, + traits::Zero, + transaction_validity, BuildStorage, PerU16, Perbill, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -38,26 +40,30 @@ use sp_staking::{ use sp_std::prelude::*; use std::collections::BTreeMap; -use frame_election_provider_support::{onchain, ElectionDataProvider, SequentialPhragmen, Weight}; +use codec::Decode; +use frame_election_provider_support::{ + bounds::ElectionBoundsBuilder, onchain, ElectionDataProvider, ExtendedBalance, + SequentialPhragmen, Weight, +}; use pallet_election_provider_multi_phase::{ - unsigned::MinerConfig, ElectionCompute, QueuedSolution, SolutionAccuracyOf, + unsigned::MinerConfig, Call, ElectionCompute, QueuedSolution, SolutionAccuracyOf, }; use pallet_staking::StakerStatus; +use parking_lot::RwLock; +use std::sync::Arc; + +use frame_support::derive_impl; use crate::{log, log_current_time}; -pub const INIT_TIMESTAMP: u64 = 30_000; -pub const BLOCK_TIME: u64 = 1000; +pub const INIT_TIMESTAMP: BlockNumber = 30_000; +pub const BLOCK_TIME: BlockNumber = 1000; -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlockU32; type Extrinsic = testing::TestXt; frame_support::construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Runtime { System: frame_system, ElectionProviderMultiPhase: pallet_election_provider_multi_phase, @@ -70,39 +76,26 @@ frame_support::construct_runtime!( } ); -pub(crate) type AccountId = u128; +pub(crate) type AccountId = u64; pub(crate) type AccountIndex = u32; -pub(crate) type BlockNumber = u64; +pub(crate) type BlockNumber = u32; pub(crate) type Balance = u64; -pub(crate) type VoterIndex = u32; +pub(crate) type VoterIndex = u16; pub(crate) type TargetIndex = u16; -pub(crate) type Moment = u64; +pub(crate) type Moment = u32; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); + type Block = Block; + type BlockHashCount = ConstU32<10>; + type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = traits::ConstU32<16>; + + type AccountData = pallet_balances::AccountData; } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); @@ -126,7 +119,7 @@ impl pallet_balances::Config for Runtime { type AccountStore = System; type MaxHolds = ConstU32<1>; type MaxFreezes = traits::ConstU32<1>; - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); type WeightInfo = (); } @@ -134,13 +127,13 @@ impl pallet_balances::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = Moment; type OnTimestampSet = (); - type MinimumPeriod = traits::ConstU64<5>; + type MinimumPeriod = traits::ConstU32<5>; type WeightInfo = (); } parameter_types! { - pub static Period: BlockNumber = 30; - pub static Offset: BlockNumber = 0; + pub static Period: u32 = 30; + pub static Offset: u32 = 0; } sp_runtime::impl_opaque_keys! { @@ -181,16 +174,15 @@ parameter_types! { // we expect a minimum of 3 blocks in signed phase and unsigned phases before trying // enetering in emergency phase after the election failed. pub static MinBlocksBeforeEmergency: BlockNumber = 3; - pub static MaxElectingVoters: VoterIndex = 1000; - pub static MaxElectableTargets: TargetIndex = 1000; pub static MaxActiveValidators: u32 = 1000; pub static OffchainRepeat: u32 = 5; pub static MinerMaxLength: u32 = 256; pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; pub static TransactionPriority: transaction_validity::TransactionPriority = 1; + #[derive(Debug)] pub static MaxWinners: u32 = 100; - pub static MaxVotesPerVoter: u32 = 16; - pub static MaxNominations: u32 = 16; + pub static ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = ElectionBoundsBuilder::default() + .voters_count(1_000.into()).targets_count(1_000.into()).build(); } impl pallet_election_provider_multi_phase::Config for Runtime { @@ -219,9 +211,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, ()>; type ForceOrigin = EnsureRoot; - type MaxElectableTargets = MaxElectableTargets; - type MaxElectingVoters = MaxElectingVoters; type MaxWinners = MaxWinners; + type ElectionBounds = ElectionBounds; type BenchmarkingConfig = NoopElectionProviderBenchmarkConfig; type WeightInfo = (); } @@ -260,12 +251,14 @@ impl pallet_bags_list::Config for Runtime { type Score = VoteWeight; } +/// Upper limit on the number of NPOS nominations. +const MAX_QUOTA_NOMINATIONS: u32 = 16; + impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); // burn slashes @@ -282,10 +275,11 @@ impl pallet_staking::Config for Runtime { type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = BagsList; + type NominationsQuota = pallet_staking::FixedNominationsQuota; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = HistoryDepth; - type OnStakerSlash = (); + type EventListeners = (); type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } @@ -314,8 +308,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = MaxWinners; - type VotersBound = VotersBound; - type TargetsBound = TargetsBound; + type Bounds = ElectionBounds; } pub struct NoopElectionProviderBenchmarkConfig; @@ -487,17 +480,18 @@ impl Default for ExtBuilder { } impl ExtBuilder { - pub fn build(self) -> sp_io::TestExternalities { + pub fn build(&self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = - frame_system::GenesisConfig::default().build_storage::().unwrap(); + frame_system::GenesisConfig::::default().build_storage().unwrap(); - let _ = - pallet_balances::GenesisConfig:: { balances: self.balances_builder.balances } - .assimilate_storage(&mut storage); + let _ = pallet_balances::GenesisConfig:: { + balances: self.balances_builder.balances.clone(), + } + .assimilate_storage(&mut storage); let mut stakers = self.staking_builder.stakers.clone(); - self.staking_builder.status.into_iter().for_each(|(stash, status)| { + self.staking_builder.status.clone().into_iter().for_each(|(stash, status)| { let (_, _, _, ref mut prev_status) = stakers .iter_mut() .find(|s| s.0 == stash) @@ -505,7 +499,7 @@ impl ExtBuilder { *prev_status = status; }); // replaced any of the stakes if needed. - self.staking_builder.stakes.into_iter().for_each(|(stash, stake)| { + self.staking_builder.stakes.clone().into_iter().for_each(|(stash, stake)| { let (_, _, ref mut prev_stake, _) = stakers .iter_mut() .find(|s| s.0 == stash) @@ -540,12 +534,13 @@ impl ExtBuilder { ext.execute_with(|| { System::set_block_number(1); Session::on_initialize(1); - >::on_initialize(1); + >::on_initialize(1); Timestamp::set_timestamp(INIT_TIMESTAMP); }); ext } + pub fn staking(mut self, builder: StakingExtBuilder) -> Self { self.staking_builder = builder; self @@ -561,8 +556,33 @@ impl ExtBuilder { self } + pub fn build_offchainify( + self, + ) -> (sp_io::TestExternalities, Arc>, Arc>) { + // add offchain and pool externality extensions. + let mut ext = self.build(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + (ext, pool_state, offchain_state) + } + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - self.build().execute_with(test) + let mut ext = self.build(); + ext.execute_with(test); + + #[cfg(feature = "try-runtime")] + ext.execute_with(|| { + let bn = System::block_number(); + + assert_ok!(>::try_state(bn)); + assert_ok!(>::try_state(bn)); + assert_ok!(>::try_state(bn)); + }); } } @@ -594,18 +614,67 @@ pub fn roll_to(n: BlockNumber, delay_solution: bool) { } } +// Progress to given block, triggering session and era changes as we progress and ensuring that +// there is a solution queued when expected. +pub fn roll_to_with_ocw(n: BlockNumber, pool: Arc>, delay_solution: bool) { + for b in (System::block_number()) + 1..=n { + System::set_block_number(b); + Session::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); + + ElectionProviderMultiPhase::on_initialize(b); + ElectionProviderMultiPhase::offchain_worker(b); + + if !delay_solution && pool.read().transactions.len() > 0 { + // decode submit_unsigned callable that may be queued in the pool by ocw. skip all + // other extrinsics in the pool. + for encoded in &pool.read().transactions { + let extrinsic = Extrinsic::decode(&mut &encoded[..]).unwrap(); + + let _ = match extrinsic.call { + RuntimeCall::ElectionProviderMultiPhase( + call @ Call::submit_unsigned { .. }, + ) => { + // call submit_unsigned callable in OCW pool. + crate::assert_ok!(call.dispatch_bypass_filter(RuntimeOrigin::none())); + }, + _ => (), + }; + } + + pool.try_write().unwrap().transactions.clear(); + } + + Staking::on_initialize(b); + if b != n { + Staking::on_finalize(System::block_number()); + } + + log_current_time(); + } +} +// helper to progress one block ahead. +pub fn roll_one(pool: Arc>, delay_solution: bool) { + let bn = System::block_number().saturating_add(1); + roll_to_with_ocw(bn, pool, delay_solution); +} + /// Progresses from the current block number (whatever that may be) to the block where the session /// `session_index` starts. -pub(crate) fn start_session(session_index: SessionIndex, delay_solution: bool) { - let end: u64 = if Offset::get().is_zero() { - Period::get() * (session_index as u64) +pub(crate) fn start_session( + session_index: SessionIndex, + pool: Arc>, + delay_solution: bool, +) { + let end = if Offset::get().is_zero() { + Period::get() * session_index } else { - Offset::get() * (session_index as u64) + Period::get() * (session_index as u64) + Offset::get() * session_index + Period::get() * session_index }; assert!(end >= System::block_number()); - roll_to(end, delay_solution); + roll_to_with_ocw(end, pool, delay_solution); // session must have progressed properly. assert_eq!( @@ -618,29 +687,41 @@ pub(crate) fn start_session(session_index: SessionIndex, delay_solution: bool) { } /// Go one session forward. -pub(crate) fn advance_session() { +pub(crate) fn advance_session(pool: Arc>) { let current_index = Session::current_index(); - start_session(current_index + 1, false); + start_session(current_index + 1, pool, false); } -pub(crate) fn advance_session_delayed_solution() { +pub(crate) fn advance_session_delayed_solution(pool: Arc>) { let current_index = Session::current_index(); - start_session(current_index + 1, true); + start_session(current_index + 1, pool, true); } -pub(crate) fn start_next_active_era() -> Result<(), ()> { - start_active_era(active_era() + 1, false) +pub(crate) fn start_next_active_era(pool: Arc>) -> Result<(), ()> { + start_active_era(active_era() + 1, pool, false) } -pub(crate) fn start_next_active_era_delayed_solution() -> Result<(), ()> { - start_active_era(active_era() + 1, true) +pub(crate) fn start_next_active_era_delayed_solution( + pool: Arc>, +) -> Result<(), ()> { + start_active_era(active_era() + 1, pool, true) +} + +pub(crate) fn advance_eras(n: usize, pool: Arc>) { + for _ in 0..n { + assert_ok!(start_next_active_era(pool.clone())); + } } /// Progress until the given era. -pub(crate) fn start_active_era(era_index: EraIndex, delay_solution: bool) -> Result<(), ()> { +pub(crate) fn start_active_era( + era_index: EraIndex, + pool: Arc>, + delay_solution: bool, +) -> Result<(), ()> { let era_before = current_era(); - start_session((era_index * >::get()).into(), delay_solution); + start_session((era_index * >::get()).into(), pool, delay_solution); log!( info, @@ -753,7 +834,7 @@ pub(crate) fn slash_through_offending_threshold() { // Slashes a percentage of the active nominators that haven't been slashed yet, with // a minimum of 1 validator slash. -pub(crate) fn slash_percentage(percentage: Perbill) -> Vec { +pub(crate) fn slash_percentage(percentage: Perbill) -> Vec { let validators = Session::validators(); let mut remaining_slashes = (percentage * validators.len() as u32).max(1); let mut slashed = vec![]; @@ -789,3 +870,17 @@ pub(crate) fn staking_events() -> Vec> { .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) .collect::>() } + +pub(crate) fn epm_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let RuntimeEvent::ElectionProviderMultiPhase(inner) = e { + Some(inner) + } else { + None + } + }) + .collect::>() +} diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index a519d964c60dc..2aeb06b0442de 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -12,20 +12,20 @@ description = "election provider supporting traits" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { version = "4.0.0-dev", path = "solution-type" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] rand = { version = "0.8.5", features = ["small_rng"] } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elections" } [features] @@ -41,6 +41,15 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-io/std" +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = [] -try-runtime = [] diff --git a/frame/election-provider-support/benchmarking/Cargo.toml b/frame/election-provider-support/benchmarking/Cargo.toml index bef371ec5efbf..2614c61d4352d 100644 --- a/frame/election-provider-support/benchmarking/Cargo.toml +++ b/frame/election-provider-support/benchmarking/Cargo.toml @@ -12,14 +12,14 @@ description = "Benchmarking for election provider support onchain config trait" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = ".." } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] @@ -34,4 +34,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] diff --git a/frame/election-provider-support/solution-type/Cargo.toml b/frame/election-provider-support/solution-type/Cargo.toml index 95ad6f226663b..ef98cb8c1f38f 100644 --- a/frame/election-provider-support/solution-type/Cargo.toml +++ b/frame/election-provider-support/solution-type/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.14", features = ["full", "visit"] } -quote = "1.0.26" +syn = { version = "2.0.16", features = ["full", "visit"] } +quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" [dev-dependencies] -parity-scale-codec = "3.2.2" +parity-scale-codec = "3.6.1" scale-info = "2.1.1" -sp-arithmetic = { version = "6.0.0", path = "../../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", path = "../../../primitives/arithmetic" } # used by generate_solution_type: frame-election-provider-support = { version = "4.0.0-dev", path = ".." } frame-support = { version = "4.0.0-dev", path = "../../support" } diff --git a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 3060b861e9023..91b544d162198 100644 --- a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -17,12 +17,12 @@ clap = { version = "4.2.5", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { version = "4.0.0-dev", path = ".." } frame-election-provider-support = { version = "4.0.0-dev", path = "../.." } -sp-arithmetic = { version = "6.0.0", path = "../../../../primitives/arithmetic" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-arithmetic = { version = "16.0.0", path = "../../../../primitives/arithmetic" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } # used by generate_solution_type: sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/npos-elections" } frame-support = { version = "4.0.0-dev", path = "../../../support" } diff --git a/frame/election-provider-support/solution-type/src/codec.rs b/frame/election-provider-support/solution-type/src/codec.rs index 3e91fc1ea92f6..17a256c228e28 100644 --- a/frame/election-provider-support/solution-type/src/codec.rs +++ b/frame/election-provider-support/solution-type/src/codec.rs @@ -51,14 +51,14 @@ fn decode_impl( quote! { let #name = < - _feps::sp_std::prelude::Vec<(_feps::codec::Compact<#voter_type>, _feps::codec::Compact<#target_type>)> + _fepsp::sp_std::prelude::Vec<(_fepsp::codec::Compact<#voter_type>, _fepsp::codec::Compact<#target_type>)> as - _feps::codec::Decode + _fepsp::codec::Decode >::decode(value)?; let #name = #name .into_iter() .map(|(v, t)| (v.0, t.0)) - .collect::<_feps::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::sp_std::prelude::Vec<_>>(); } }; @@ -73,12 +73,12 @@ fn decode_impl( quote! { let #name = < - _feps::sp_std::prelude::Vec<( - _feps::codec::Compact<#voter_type>, - [(_feps::codec::Compact<#target_type>, _feps::codec::Compact<#weight_type>); #c-1], - _feps::codec::Compact<#target_type>, + _fepsp::sp_std::prelude::Vec<( + _fepsp::codec::Compact<#voter_type>, + [(_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>); #c-1], + _fepsp::codec::Compact<#target_type>, )> - as _feps::codec::Decode + as _fepsp::codec::Decode >::decode(value)?; let #name = #name .into_iter() @@ -87,7 +87,7 @@ fn decode_impl( [ #inner_impl ], t_last.0, )) - .collect::<_feps::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::sp_std::prelude::Vec<_>>(); } }) .collect::(); @@ -100,8 +100,8 @@ fn decode_impl( .collect::(); quote!( - impl _feps::codec::Decode for #ident { - fn decode(value: &mut I) -> Result { + impl _fepsp::codec::Decode for #ident { + fn decode(value: &mut I) -> Result { #decode_impl_single #decode_impl_rest @@ -123,10 +123,10 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { let #name = self.#name .iter() .map(|(v, t)| ( - _feps::codec::Compact(v.clone()), - _feps::codec::Compact(t.clone()), + _fepsp::codec::Compact(v.clone()), + _fepsp::codec::Compact(t.clone()), )) - .collect::<_feps::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::sp_std::prelude::Vec<_>>(); #name.encode_to(&mut r); } }; @@ -139,8 +139,8 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { let inners_solution_array = (0..c - 1) .map(|i| { quote! {( - _feps::codec::Compact(inner[#i].0.clone()), - _feps::codec::Compact(inner[#i].1.clone()), + _fepsp::codec::Compact(inner[#i].0.clone()), + _fepsp::codec::Compact(inner[#i].1.clone()), ),} }) .collect::(); @@ -149,19 +149,19 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { let #name = self.#name .iter() .map(|(v, inner, t_last)| ( - _feps::codec::Compact(v.clone()), + _fepsp::codec::Compact(v.clone()), [ #inners_solution_array ], - _feps::codec::Compact(t_last.clone()), + _fepsp::codec::Compact(t_last.clone()), )) - .collect::<_feps::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::sp_std::prelude::Vec<_>>(); #name.encode_to(&mut r); } }) .collect::(); quote!( - impl _feps::codec::Encode for #ident { - fn encode(&self) -> _feps::sp_std::prelude::Vec { + impl _fepsp::codec::Encode for #ident { + fn encode(&self) -> _fepsp::sp_std::prelude::Vec { let mut r = vec![]; #encode_impl_single #encode_impl_rest @@ -182,8 +182,8 @@ fn scale_info_impl( let name = format!("{}", vote_field(1)); quote! { .field(|f| - f.ty::<_feps::sp_std::prelude::Vec< - (_feps::codec::Compact<#voter_type>, _feps::codec::Compact<#target_type>) + f.ty::<_fepsp::sp_std::prelude::Vec< + (_fepsp::codec::Compact<#voter_type>, _fepsp::codec::Compact<#target_type>) >>() .name(#name) ) @@ -194,10 +194,10 @@ fn scale_info_impl( let name = format!("{}", vote_field(2)); quote! { .field(|f| - f.ty::<_feps::sp_std::prelude::Vec<( - _feps::codec::Compact<#voter_type>, - (_feps::codec::Compact<#target_type>, _feps::codec::Compact<#weight_type>), - _feps::codec::Compact<#target_type> + f.ty::<_fepsp::sp_std::prelude::Vec<( + _fepsp::codec::Compact<#voter_type>, + (_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>), + _fepsp::codec::Compact<#target_type> )>>() .name(#name) ) @@ -209,13 +209,13 @@ fn scale_info_impl( let name = format!("{}", vote_field(c)); quote! { .field(|f| - f.ty::<_feps::sp_std::prelude::Vec<( - _feps::codec::Compact<#voter_type>, + f.ty::<_fepsp::sp_std::prelude::Vec<( + _fepsp::codec::Compact<#voter_type>, [ - (_feps::codec::Compact<#target_type>, _feps::codec::Compact<#weight_type>); + (_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>); #c - 1 ], - _feps::codec::Compact<#target_type> + _fepsp::codec::Compact<#target_type> )>>() .name(#name) ) @@ -224,14 +224,14 @@ fn scale_info_impl( .collect::(); quote!( - impl _feps::scale_info::TypeInfo for #ident { + impl _fepsp::scale_info::TypeInfo for #ident { type Identity = Self; - fn type_info() -> _feps::scale_info::Type<_feps::scale_info::form::MetaForm> { - _feps::scale_info::Type::builder() - .path(_feps::scale_info::Path::new(stringify!(#ident), module_path!())) + fn type_info() -> _fepsp::scale_info::Type<_fepsp::scale_info::form::MetaForm> { + _fepsp::scale_info::Type::builder() + .path(_fepsp::scale_info::Path::new(stringify!(#ident), module_path!())) .composite( - _feps::scale_info::build::Fields::named() + _fepsp::scale_info::build::Fields::named() #scale_info_impl_single #scale_info_impl_double #scale_info_impl_rest diff --git a/frame/election-provider-support/solution-type/src/lib.rs b/frame/election-provider-support/solution-type/src/lib.rs index 6938953071a7c..80773f6fb4768 100644 --- a/frame/election-provider-support/solution-type/src/lib.rs +++ b/frame/election-provider-support/solution-type/src/lib.rs @@ -252,10 +252,16 @@ where fn imports() -> Result { match crate_name("frame-election-provider-support") { - Ok(FoundCrate::Itself) => Ok(quote! { use crate as _feps; }), + Ok(FoundCrate::Itself) => Ok(quote! { + use crate as _feps; + use _feps::private as _fepsp; + }), Ok(FoundCrate::Name(frame_election_provider_support)) => { let ident = syn::Ident::new(&frame_election_provider_support, Span::call_site()); - Ok(quote!( extern crate #ident as _feps; )) + Ok(quote!( + use #ident as _feps; + use _feps::private as _fepsp; + )) }, Err(e) => Err(syn::Error::new(Span::call_site(), e)), } diff --git a/frame/election-provider-support/solution-type/src/single_page.rs b/frame/election-provider-support/solution-type/src/single_page.rs index 688fee70a323b..161631ee83fa6 100644 --- a/frame/election-provider-support/solution-type/src/single_page.rs +++ b/frame/election-provider-support/solution-type/src/single_page.rs @@ -40,7 +40,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let name = vote_field(1); // NOTE: we use the visibility of the struct for the fields as well.. could be made better. quote!( - #vis #name: _feps::sp_std::prelude::Vec<(#voter_type, #target_type)>, + #vis #name: _fepsp::sp_std::prelude::Vec<(#voter_type, #target_type)>, ) }; @@ -49,7 +49,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let field_name = vote_field(c); let array_len = c - 1; quote!( - #vis #field_name: _feps::sp_std::prelude::Vec<( + #vis #field_name: _fepsp::sp_std::prelude::Vec<( #voter_type, [(#target_type, #weight_type); #array_len], #target_type @@ -84,9 +84,9 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { Eq, Clone, Debug, - _feps::codec::Encode, - _feps::codec::Decode, - _feps::scale_info::TypeInfo, + _fepsp::codec::Encode, + _fepsp::codec::Decode, + _fepsp::scale_info::TypeInfo, )]) }; @@ -102,7 +102,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { #derives_and_maybe_compact_encoding #vis struct #ident { #single #rest } - use _feps::__OrInvalidIndex; + use _fepsp::__OrInvalidIndex; impl _feps::NposSolution for #ident { const LIMIT: usize = #count; type VoterIndex = #voter_type; @@ -147,8 +147,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { self, voter_at: impl Fn(Self::VoterIndex) -> Option, target_at: impl Fn(Self::TargetIndex) -> Option, - ) -> Result<_feps::sp_std::prelude::Vec<_feps::Assignment>, _feps::Error> { - let mut #assignment_name: _feps::sp_std::prelude::Vec<_feps::Assignment> = Default::default(); + ) -> Result<_fepsp::sp_std::prelude::Vec<_feps::Assignment>, _feps::Error> { + let mut #assignment_name: _fepsp::sp_std::prelude::Vec<_feps::Assignment> = Default::default(); #into_impl Ok(#assignment_name) } @@ -165,10 +165,10 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { all_edges } - fn unique_targets(&self) -> _feps::sp_std::prelude::Vec { + fn unique_targets(&self) -> _fepsp::sp_std::prelude::Vec { // NOTE: this implementation returns the targets sorted, but we don't use it yet per // se, nor is the API enforcing it. - use _feps::sp_std::collections::btree_set::BTreeSet; + use _fepsp::sp_std::collections::btree_set::BTreeSet; let mut all_targets: BTreeSet = BTreeSet::new(); let mut maybe_insert_target = |t: Self::TargetIndex| { all_targets.insert(t); @@ -185,10 +185,10 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { <#ident as _feps::NposSolution>::TargetIndex, <#ident as _feps::NposSolution>::Accuracy, >; - impl _feps::codec::MaxEncodedLen for #ident { + impl _fepsp::codec::MaxEncodedLen for #ident { fn max_encoded_len() -> usize { use frame_support::traits::Get; - use _feps::codec::Encode; + use _fepsp::codec::Encode; let s: u32 = #max_voters::get(); let max_element_size = // the first voter.. @@ -202,11 +202,11 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { // The assumption is that it contains #count-1 empty elements // and then last element with full size #count - .saturating_mul(_feps::codec::Compact(0u32).encoded_size()) + .saturating_mul(_fepsp::codec::Compact(0u32).encoded_size()) .saturating_add((s as usize).saturating_mul(max_element_size)) } } - impl<'a> _feps::sp_std::convert::TryFrom<&'a [__IndexAssignment]> for #ident { + impl<'a> _fepsp::sp_std::convert::TryFrom<&'a [__IndexAssignment]> for #ident { type Error = _feps::Error; fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result { let mut #struct_name = #ident::default(); @@ -357,18 +357,18 @@ pub(crate) fn into_impl( let mut inners_parsed = inners .iter() .map(|(ref t_idx, p)| { - sum = _feps::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); + sum = _fepsp::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) }) - .collect::, _feps::Error>>()?; + .collect::, _feps::Error>>()?; if sum >= #per_thing::one() { return Err(_feps::Error::SolutionWeightOverflow); } // defensive only. Since Percent doesn't have `Sub`. - let p_last = _feps::sp_arithmetic::traits::Saturating::saturating_sub( + let p_last = _fepsp::sp_arithmetic::traits::Saturating::saturating_sub( #per_thing::one(), sum, ); diff --git a/frame/election-provider-support/src/bounds.rs b/frame/election-provider-support/src/bounds.rs new file mode 100644 index 0000000000000..b9ae21e49ca70 --- /dev/null +++ b/frame/election-provider-support/src/bounds.rs @@ -0,0 +1,460 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types and helpers to define and handle election bounds. +//! +//! ### Overview +//! +//! This module defines and implements types that help creating and handling election bounds. +//! [`DataProviderBounds`] encapsulates the upper limits for the results provided by `DataProvider` +//! implementors. Those limits can be defined over two axis: number of elements returned (`count`) +//! and/or the size of the returned SCALE encoded structure (`size`). +//! +//! [`ElectionBoundsBuilder`] is a helper to construct data election bounds and it aims at +//! preventing the caller from mistake the order of size and count limits. +//! +//! ### Examples +//! +//! [`ElectionBoundsBuilder`] helps defining the size and count bounds for both voters and targets. +//! +//! ``` +//! use frame_election_provider_support::bounds::*; +//! +//! // unbounded limits are never exhausted. +//! let unbounded = ElectionBoundsBuilder::default().build(); +//! assert!(!unbounded.targets.exhausted(SizeBound(1_000_000_000).into(), None)); +//! +//! let bounds = ElectionBoundsBuilder::default() +//! .voters_count(100.into()) +//! .voters_size(1_000.into()) +//! .targets_count(200.into()) +//! .targets_size(2_000.into()) +//! .build(); +//! +//! assert!(!bounds.targets.exhausted(SizeBound(1).into(), CountBound(1).into())); +//! assert!(bounds.targets.exhausted(SizeBound(1).into(), CountBound(100_000).into())); +//! ``` +//! +//! ### Implementation details +//! +//! A default or `None` bound means that no bounds are enforced (i.e. unlimited result size). In +//! general, be careful when using unbounded election bounds in production. + +use core::ops::Add; +use sp_runtime::traits::Zero; + +/// Count type for data provider bounds. +/// +/// Encapsulates the counting of things that can be bounded in an election, such as voters, +/// targets or anything else. +/// +/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// `SizeBound` and vice-versa. +#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct CountBound(pub u32); + +impl From for CountBound { + fn from(value: u32) -> Self { + CountBound(value) + } +} + +impl Add for CountBound { + type Output = Self; + fn add(self, rhs: Self) -> Self::Output { + CountBound(self.0.saturating_add(rhs.0)) + } +} + +impl Zero for CountBound { + fn is_zero(&self) -> bool { + self.0 == 0u32 + } + fn zero() -> Self { + CountBound(0) + } +} + +/// Size type for data provider bounds. +/// +/// Encapsulates the size limit of things that can be bounded in an election, such as voters, +/// targets or anything else. The size unit can represent anything depending on the election +/// logic and implementation, but it most likely will represent bytes in SCALE encoding in this +/// context. +/// +/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// `SizeBound` and vice-versa. +#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct SizeBound(pub u32); + +impl From for SizeBound { + fn from(value: u32) -> Self { + SizeBound(value) + } +} + +impl Zero for SizeBound { + fn is_zero(&self) -> bool { + self.0 == 0u32 + } + fn zero() -> Self { + SizeBound(0) + } +} + +impl Add for SizeBound { + type Output = Self; + fn add(self, rhs: Self) -> Self::Output { + SizeBound(self.0.saturating_add(rhs.0)) + } +} + +/// Data bounds for election data. +/// +/// Limits the data returned by `DataProvider` implementors, defined over two axis: `count`, +/// defining the maximum number of elements returned, and `size`, defining the limit in size +/// (bytes) of the SCALE encoded result. +/// +/// `None` represents unlimited bounds in both `count` and `size` axis. +#[derive(Clone, Copy, Default, Debug, Eq, PartialEq)] +pub struct DataProviderBounds { + pub count: Option, + pub size: Option, +} + +impl DataProviderBounds { + /// Returns true if `given_count` exhausts `self.count`. + pub fn count_exhausted(self, given_count: CountBound) -> bool { + self.count.map_or(false, |count| given_count > count) + } + + /// Returns true if `given_size` exhausts `self.size`. + pub fn size_exhausted(self, given_size: SizeBound) -> bool { + self.size.map_or(false, |size| given_size > size) + } + + /// Returns true if `given_size` or `given_count` exhausts `self.size` or `self_count`, + /// respectively. + pub fn exhausted(self, given_size: Option, given_count: Option) -> bool { + self.count_exhausted(given_count.unwrap_or(CountBound::zero())) || + self.size_exhausted(given_size.unwrap_or(SizeBound::zero())) + } + + /// Returns an instance of `Self` that is constructed by capping both the `count` and `size` + /// fields. If `self` is None, overwrite it with the provided bounds. + pub fn max(self, bounds: DataProviderBounds) -> Self { + DataProviderBounds { + count: self + .count + .map(|c| { + c.clamp(CountBound::zero(), bounds.count.unwrap_or(CountBound(u32::MAX))).into() + }) + .or(bounds.count), + size: self + .size + .map(|c| { + c.clamp(SizeBound::zero(), bounds.size.unwrap_or(SizeBound(u32::MAX))).into() + }) + .or(bounds.size), + } + } +} + +/// The voter and target bounds of an election. +/// +/// The bounds are defined over two axis: `count` of element of the election (voters or targets) and +/// the `size` of the SCALE encoded result snapshot. +#[derive(Clone, Debug, Copy)] +pub struct ElectionBounds { + pub voters: DataProviderBounds, + pub targets: DataProviderBounds, +} + +impl ElectionBounds { + /// Returns an error if the provided `count` and `size` do not fit in the voter's election + /// bounds. + pub fn ensure_voters_limits( + self, + count: CountBound, + size: SizeBound, + ) -> Result<(), &'static str> { + match self.voters.exhausted(Some(size), Some(count)) { + true => Err("Ensure voters bounds: bounds exceeded."), + false => Ok(()), + } + } + + /// Returns an error if the provided `count` and `size` do not fit in the target's election + /// bounds. + pub fn ensure_targets_limits( + self, + count: CountBound, + size: SizeBound, + ) -> Result<(), &'static str> { + match self.targets.exhausted(Some(size), Some(count).into()) { + true => Err("Ensure targets bounds: bounds exceeded."), + false => Ok(()), + } + } +} + +/// Utility builder for [`ElectionBounds`]. +#[derive(Copy, Clone, Default)] +pub struct ElectionBoundsBuilder { + voters: Option, + targets: Option, +} + +impl From for ElectionBoundsBuilder { + fn from(bounds: ElectionBounds) -> Self { + ElectionBoundsBuilder { voters: Some(bounds.voters), targets: Some(bounds.targets) } + } +} + +impl ElectionBoundsBuilder { + /// Sets the voters count bounds. + pub fn voters_count(mut self, count: CountBound) -> Self { + self.voters = self.voters.map_or( + Some(DataProviderBounds { count: Some(count), size: None }), + |mut bounds| { + bounds.count = Some(count); + Some(bounds) + }, + ); + self + } + + /// Sets the voters size bounds. + pub fn voters_size(mut self, size: SizeBound) -> Self { + self.voters = self.voters.map_or( + Some(DataProviderBounds { count: None, size: Some(size) }), + |mut bounds| { + bounds.size = Some(size); + Some(bounds) + }, + ); + self + } + + /// Sets the targets count bounds. + pub fn targets_count(mut self, count: CountBound) -> Self { + self.targets = self.targets.map_or( + Some(DataProviderBounds { count: Some(count), size: None }), + |mut bounds| { + bounds.count = Some(count); + Some(bounds) + }, + ); + self + } + + /// Sets the targets size bounds. + pub fn targets_size(mut self, size: SizeBound) -> Self { + self.targets = self.targets.map_or( + Some(DataProviderBounds { count: None, size: Some(size) }), + |mut bounds| { + bounds.size = Some(size); + Some(bounds) + }, + ); + self + } + + /// Set the voters bounds. + pub fn voters(mut self, bounds: Option) -> Self { + self.voters = bounds; + self + } + + /// Set the targets bounds. + pub fn targets(mut self, bounds: Option) -> Self { + self.targets = bounds; + self + } + + /// Caps the number of the voters bounds in self to `voters` bounds. If `voters` bounds are + /// higher than the self bounds, keeps it. Note that `None` bounds are equivalent to maximum + /// and should be treated as such. + pub fn voters_or_lower(mut self, voters: DataProviderBounds) -> Self { + self.voters = match self.voters { + None => Some(voters), + Some(v) => Some(v.max(voters)), + }; + self + } + + /// Caps the number of the target bounds in self to `voters` bounds. If `voters` bounds are + /// higher than the self bounds, keeps it. Note that `None` bounds are equivalent to maximum + /// and should be treated as such. + pub fn targets_or_lower(mut self, targets: DataProviderBounds) -> Self { + self.targets = match self.targets { + None => Some(targets), + Some(t) => Some(t.max(targets)), + }; + self + } + + /// Returns an instance of `ElectionBounds` from the current state. + pub fn build(self) -> ElectionBounds { + ElectionBounds { + voters: self.voters.unwrap_or_default(), + targets: self.targets.unwrap_or_default(), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + use frame_support::{assert_err, assert_ok}; + + #[test] + fn data_provider_bounds_unbounded_works() { + let bounds = DataProviderBounds::default(); + assert!(!bounds.exhausted(None, None)); + assert!(!bounds.exhausted(SizeBound(u32::MAX).into(), CountBound(u32::MAX).into())); + } + + #[test] + fn election_bounds_builder_and_exhausted_bounds_work() { + // voter bounds exhausts if count > 100 or size > 1_000; target bounds exhausts if count > + // 200 or size > 2_000. + let bounds = ElectionBoundsBuilder::default() + .voters_count(100.into()) + .voters_size(1_000.into()) + .targets_count(200.into()) + .targets_size(2_000.into()) + .build(); + + assert!(!bounds.voters.exhausted(None, None)); + assert!(!bounds.voters.exhausted(SizeBound(10).into(), CountBound(10).into())); + assert!(!bounds.voters.exhausted(None, CountBound(100).into())); + assert!(!bounds.voters.exhausted(SizeBound(1_000).into(), None)); + // exhausts bounds. + assert!(bounds.voters.exhausted(None, CountBound(101).into())); + assert!(bounds.voters.exhausted(SizeBound(1_001).into(), None)); + + assert!(!bounds.targets.exhausted(None, None)); + assert!(!bounds.targets.exhausted(SizeBound(20).into(), CountBound(20).into())); + assert!(!bounds.targets.exhausted(None, CountBound(200).into())); + assert!(!bounds.targets.exhausted(SizeBound(2_000).into(), None)); + // exhausts bounds. + assert!(bounds.targets.exhausted(None, CountBound(201).into())); + assert!(bounds.targets.exhausted(SizeBound(2_001).into(), None)); + } + + #[test] + fn election_bounds_ensure_limits_works() { + let bounds = ElectionBounds { + voters: DataProviderBounds { count: Some(CountBound(10)), size: Some(SizeBound(10)) }, + targets: DataProviderBounds { count: Some(CountBound(10)), size: Some(SizeBound(10)) }, + }; + + assert_ok!(bounds.ensure_voters_limits(CountBound(1), SizeBound(1))); + assert_ok!(bounds.ensure_voters_limits(CountBound(1), SizeBound(1))); + assert_ok!(bounds.ensure_voters_limits(CountBound(10), SizeBound(10))); + assert_err!( + bounds.ensure_voters_limits(CountBound(1), SizeBound(11)), + "Ensure voters bounds: bounds exceeded." + ); + assert_err!( + bounds.ensure_voters_limits(CountBound(11), SizeBound(10)), + "Ensure voters bounds: bounds exceeded." + ); + + assert_ok!(bounds.ensure_targets_limits(CountBound(1), SizeBound(1))); + assert_ok!(bounds.ensure_targets_limits(CountBound(1), SizeBound(1))); + assert_ok!(bounds.ensure_targets_limits(CountBound(10), SizeBound(10))); + assert_err!( + bounds.ensure_targets_limits(CountBound(1), SizeBound(11)), + "Ensure targets bounds: bounds exceeded." + ); + assert_err!( + bounds.ensure_targets_limits(CountBound(11), SizeBound(10)), + "Ensure targets bounds: bounds exceeded." + ); + } + + #[test] + fn data_provider_max_unbounded_works() { + let unbounded = DataProviderBounds::default(); + + // max of some bounds with unbounded data provider bounds will always return the defined + // bounds. + let bounds = DataProviderBounds { count: CountBound(5).into(), size: SizeBound(10).into() }; + assert_eq!(unbounded.max(bounds), bounds); + + let bounds = DataProviderBounds { count: None, size: SizeBound(10).into() }; + assert_eq!(unbounded.max(bounds), bounds); + + let bounds = DataProviderBounds { count: CountBound(5).into(), size: None }; + assert_eq!(unbounded.max(bounds), bounds); + } + + #[test] + fn data_provider_max_bounded_works() { + let bounds_one = + DataProviderBounds { count: CountBound(10).into(), size: SizeBound(100).into() }; + let bounds_two = + DataProviderBounds { count: CountBound(100).into(), size: SizeBound(10).into() }; + let max_bounds_expected = + DataProviderBounds { count: CountBound(10).into(), size: SizeBound(10).into() }; + + assert_eq!(bounds_one.max(bounds_two), max_bounds_expected); + assert_eq!(bounds_two.max(bounds_one), max_bounds_expected); + } + + #[test] + fn election_bounds_clamp_works() { + let bounds = ElectionBoundsBuilder::default() + .voters_count(10.into()) + .voters_size(10.into()) + .voters_or_lower(DataProviderBounds { + count: CountBound(5).into(), + size: SizeBound(20).into(), + }) + .targets_count(20.into()) + .targets_or_lower(DataProviderBounds { + count: CountBound(30).into(), + size: SizeBound(30).into(), + }) + .build(); + + assert_eq!(bounds.voters.count.unwrap(), CountBound(5)); + assert_eq!(bounds.voters.size.unwrap(), SizeBound(10)); + assert_eq!(bounds.targets.count.unwrap(), CountBound(20)); + assert_eq!(bounds.targets.size.unwrap(), SizeBound(30)); + + // note that unbounded bounds (None) are equivalent to maximum value. + let bounds = ElectionBoundsBuilder::default() + .voters_or_lower(DataProviderBounds { + count: CountBound(5).into(), + size: SizeBound(20).into(), + }) + .targets_or_lower(DataProviderBounds { + count: CountBound(10).into(), + size: SizeBound(10).into(), + }) + .build(); + + assert_eq!(bounds.voters.count.unwrap(), CountBound(5)); + assert_eq!(bounds.voters.size.unwrap(), SizeBound(20)); + assert_eq!(bounds.targets.count.unwrap(), CountBound(10)); + assert_eq!(bounds.targets.size.unwrap(), SizeBound(10)); + } +} diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index ee0e41a90cd60..577ac9c0b65a9 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -109,12 +109,12 @@ //! fn desired_targets() -> data_provider::Result { //! Ok(1) //! } -//! fn electing_voters(maybe_max_len: Option) +//! fn electing_voters(bounds: DataProviderBounds) //! -> data_provider::Result>> //! { //! Ok(Default::default()) //! } -//! fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { +//! fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { //! Ok(vec![10, 20, 30]) //! } //! fn next_election_prediction(now: BlockNumber) -> BlockNumber { @@ -145,7 +145,7 @@ //! impl ElectionProvider for GenericElectionProvider { //! fn ongoing() -> bool { false } //! fn elect() -> Result, Self::Error> { -//! Self::DataProvider::electable_targets(None) +//! Self::DataProvider::electable_targets(DataProviderBounds::default()) //! .map_err(|_| "failed to elect") //! .map(|t| bounded_vec![(t[0], Support::default())]) //! } @@ -173,11 +173,15 @@ #![cfg_attr(not(feature = "std"), no_std)] +pub mod bounds; pub mod onchain; pub mod traits; + use sp_runtime::traits::{Bounded, Saturating, Zero}; use sp_std::{fmt::Debug, prelude::*}; +pub use bounds::DataProviderBounds; +pub use codec::{Decode, Encode}; /// Re-export the solution generation macro. pub use frame_election_provider_solution_type::generate_solution_type; pub use frame_support::{traits::Get, weights::Weight, BoundedVec, RuntimeDebug}; @@ -189,15 +193,32 @@ pub use sp_npos_elections::{ }; pub use traits::NposSolution; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + // re-export for the solution macro, with the dependencies of the macro. #[doc(hidden)] -pub use codec; -#[doc(hidden)] -pub use scale_info; -#[doc(hidden)] -pub use sp_arithmetic; -#[doc(hidden)] -pub use sp_std; +pub mod private { + pub use codec; + pub use scale_info; + pub use sp_arithmetic; + pub use sp_std; + + // Simple Extension trait to easily convert `None` from index closures to `Err`. + // + // This is only generated and re-exported for the solution code to use. + pub trait __OrInvalidIndex { + fn or_invalid_index(self) -> Result; + } + + impl __OrInvalidIndex for Option { + fn or_invalid_index(self) -> Result { + self.ok_or(crate::Error::SolutionInvalidIndex) + } + } +} + +use private::__OrInvalidIndex; pub mod weights; pub use weights::WeightInfo; @@ -206,19 +227,6 @@ pub use weights::WeightInfo; mod mock; #[cfg(test)] mod tests; -// Simple Extension trait to easily convert `None` from index closures to `Err`. -// -// This is only generated and re-exported for the solution code to use. -#[doc(hidden)] -pub trait __OrInvalidIndex { - fn or_invalid_index(self) -> Result; -} - -impl __OrInvalidIndex for Option { - fn or_invalid_index(self) -> Result { - self.ok_or(Error::SolutionInvalidIndex) - } -} /// The [`IndexAssignment`] type is an intermediate between the assignments list /// ([`&[Assignment]`][Assignment]) and `SolutionOf`. @@ -227,7 +235,7 @@ impl __OrInvalidIndex for Option { /// making it fast to repeatedly encode into a `SolutionOf`. This property turns out /// to be important when trimming for solution length. #[derive(RuntimeDebug, Clone, Default)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, codec::Encode, codec::Decode))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] pub struct IndexAssignment { /// Index of the voter among the voters list. pub who: VoterIndex, @@ -271,7 +279,7 @@ pub mod data_provider { /// Something that can provide the data to an [`ElectionProvider`]. pub trait ElectionDataProvider { /// The account identifier type. - type AccountId; + type AccountId: Encode; /// The block number type. type BlockNumber; @@ -282,25 +290,18 @@ pub trait ElectionDataProvider { /// All possible targets for the election, i.e. the targets that could become elected, thus /// "electable". /// - /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items - /// long. - /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electable_targets( - maybe_max_len: Option, - ) -> data_provider::Result>; + fn electable_targets(bounds: DataProviderBounds) + -> data_provider::Result>; /// All the voters that participate in the election, thus "electing". /// /// Note that if a notion of self-vote exists, it should be represented here. /// - /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items - /// long. - /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electing_voters(maybe_max_len: Option) -> data_provider::Result>>; + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>>; /// The number of targets to elect. /// @@ -421,8 +422,8 @@ pub trait ElectionProvider: ElectionProviderBase { /// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`. pub trait InstantElectionProvider: ElectionProviderBase { fn instant_elect( - forced_input_voters_bound: Option, - forced_input_target_bound: Option, + forced_input_voters_bound: DataProviderBounds, + forced_input_target_bound: DataProviderBounds, ) -> Result, Self::Error>; } @@ -464,8 +465,8 @@ where MaxWinners: Get, { fn instant_elect( - _: Option, - _: Option, + _: DataProviderBounds, + _: DataProviderBounds, ) -> Result, Self::Error> { Err("`NoElection` cannot do anything.") } @@ -564,7 +565,7 @@ pub trait SortedListProvider { /// Check internal state of the list. Only meant for debugging. #[cfg(feature = "try-runtime")] - fn try_state() -> Result<(), &'static str>; + fn try_state() -> Result<(), TryRuntimeError>; /// If `who` changes by the returned amount they are guaranteed to have a worst case change /// in their list position. diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index a312562d4944c..21ee710ea304d 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,6 +20,7 @@ //! careful when using it onchain. use crate::{ + bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder}, BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, NposSolver, WeightInfo, }; @@ -52,8 +53,7 @@ impl From for Error { /// This implements both `ElectionProvider` and `InstantElectionProvider`. /// /// This type has some utilities to make it safe. Nonetheless, it should be used with utmost care. A -/// thoughtful value must be set as [`Config::VotersBound`] and [`Config::TargetsBound`] to ensure -/// the size of the input is sensible. +/// thoughtful value must be set as [`Config::Bounds`] to ensure the size of the input is sensible. pub struct OnChainExecution(PhantomData); #[deprecated(note = "use OnChainExecution, which is bounded by default")] @@ -73,7 +73,7 @@ pub trait Config { /// Something that provides the data for election. type DataProvider: ElectionDataProvider< AccountId = ::AccountId, - BlockNumber = ::BlockNumber, + BlockNumber = frame_system::pallet_prelude::BlockNumberFor, >; /// Weight information for extrinsics in this pallet. @@ -85,13 +85,9 @@ pub trait Config { /// always be more than `DataProvider::desired_target`. type MaxWinners: Get; - /// Bounds the number of voters, when calling into [`Config::DataProvider`]. It might be - /// overwritten in the `InstantElectionProvider` impl. - type VotersBound: Get; - - /// Bounds the number of targets, when calling into [`Config::DataProvider`]. It might be - /// overwritten in the `InstantElectionProvider` impl. - type TargetsBound: Get; + /// Elections bounds, to use when calling into [`Config::DataProvider`]. It might be overwritten + /// in the `InstantElectionProvider` impl. + type Bounds: Get; } /// Same as `BoundedSupportsOf` but for `onchain::Config`. @@ -101,12 +97,12 @@ pub type OnChainBoundedSupportsOf = BoundedSupports< >; fn elect_with_input_bounds( - maybe_max_voters: Option, - maybe_max_targets: Option, + bounds: ElectionBounds, ) -> Result, Error> { - let voters = T::DataProvider::electing_voters(maybe_max_voters).map_err(Error::DataProvider)?; - let targets = - T::DataProvider::electable_targets(maybe_max_targets).map_err(Error::DataProvider)?; + let (voters, targets) = T::DataProvider::electing_voters(bounds.voters) + .and_then(|voters| Ok((voters, T::DataProvider::electable_targets(bounds.targets)?))) + .map_err(Error::DataProvider)?; + let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; if desired_targets > T::MaxWinners::get() { @@ -151,7 +147,7 @@ fn elect_with_input_bounds( impl ElectionProviderBase for OnChainExecution { type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; + type BlockNumber = frame_system::pallet_prelude::BlockNumberFor; type Error = Error; type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; @@ -159,13 +155,15 @@ impl ElectionProviderBase for OnChainExecution { impl InstantElectionProvider for OnChainExecution { fn instant_elect( - forced_input_voters_bound: Option, - forced_input_target_bound: Option, + forced_input_voters_bounds: DataProviderBounds, + forced_input_targets_bounds: DataProviderBounds, ) -> Result, Self::Error> { - elect_with_input_bounds::( - Some(T::VotersBound::get().min(forced_input_voters_bound.unwrap_or(u32::MAX)) as usize), - Some(T::TargetsBound::get().min(forced_input_target_bound.unwrap_or(u32::MAX)) as usize), - ) + let elections_bounds = ElectionBoundsBuilder::from(T::Bounds::get()) + .voters_or_lower(forced_input_voters_bounds) + .targets_or_lower(forced_input_targets_bounds) + .build(); + + elect_with_input_bounds::(elections_bounds) } } @@ -175,10 +173,8 @@ impl ElectionProvider for OnChainExecution { } fn elect() -> Result, Self::Error> { - elect_with_input_bounds::( - Some(T::VotersBound::get() as usize), - Some(T::TargetsBound::get() as usize), - ) + let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build(); + elect_with_input_bounds::(election_bounds) } } @@ -186,10 +182,11 @@ impl ElectionProvider for OnChainExecution { mod tests { use super::*; use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; - use frame_support::{assert_noop, parameter_types, traits::ConstU32}; + use frame_support::{assert_noop, parameter_types}; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; + type Nonce = u64; type BlockNumber = u64; pub type Header = sp_runtime::generic::Header; @@ -197,10 +194,7 @@ mod tests { pub type Block = sp_runtime::generic::Block; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: frame_system::{Pallet, Call, Event}, } @@ -210,14 +204,13 @@ mod tests { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = AccountId; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = (); type BlockHashCount = (); type DbWeight = (); @@ -239,6 +232,7 @@ mod tests { parameter_types! { pub static MaxWinners: u32 = 10; pub static DesiredTargets: u32 = 2; + pub static Bounds: ElectionBounds = ElectionBoundsBuilder::default().voters_count(600.into()).targets_count(400.into()).build(); } impl Config for PhragmenParams { @@ -247,8 +241,7 @@ mod tests { type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); type MaxWinners = MaxWinners; - type VotersBound = ConstU32<600>; - type TargetsBound = ConstU32<400>; + type Bounds = Bounds; } impl Config for PhragMMSParams { @@ -257,8 +250,7 @@ mod tests { type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); type MaxWinners = MaxWinners; - type VotersBound = ConstU32<600>; - type TargetsBound = ConstU32<400>; + type Bounds = Bounds; } mod mock_data_provider { @@ -272,7 +264,7 @@ mod tests { type AccountId = AccountId; type BlockNumber = BlockNumber; type MaxVotesPerVoter = ConstU32<2>; - fn electing_voters(_: Option) -> data_provider::Result>> { + fn electing_voters(_: DataProviderBounds) -> data_provider::Result>> { Ok(vec![ (1, 10, bounded_vec![10, 20]), (2, 20, bounded_vec![30, 20]), @@ -280,7 +272,7 @@ mod tests { ]) } - fn electable_targets(_: Option) -> data_provider::Result> { + fn electable_targets(_: DataProviderBounds) -> data_provider::Result> { Ok(vec![10, 20, 30]) } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index ce39e42b8eeb5..ac4f2411dd280 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } log = { version = "0.4.14", default-features = false } @@ -21,15 +21,16 @@ scale-info = { version = "2.0.0", default-features = false, features = ["derive" frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-staking = { default-features = false, path = "../../primitives/staking" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-tracing = { path = "../../primitives/tracing" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } @@ -47,10 +48,21 @@ std = [ "sp-npos-elections/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-staking/std", + "sp-tracing/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 1d7c79fe3ca97..b23ddda4e8d1c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -101,9 +101,9 @@ use codec::{Decode, Encode}; use frame_support::{ traits::{ - defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, - CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, - ReservableCurrency, SortedMembers, WithdrawReasons, + defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, Get, + InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, + SortedMembers, WithdrawReasons, }, weights::Weight, }; @@ -113,8 +113,12 @@ use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, DispatchError, Perbill, RuntimeDebug, }; +use sp_staking::currency_to_vote::CurrencyToVote; use sp_std::{cmp::Ordering, prelude::*}; +#[cfg(any(feature = "try-runtime", test))] +use sp_runtime::TryRuntimeError; + mod benchmarking; pub mod weights; pub use weights::WeightInfo; @@ -189,7 +193,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] #[pallet::without_storage_info] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config { @@ -200,7 +204,7 @@ pub mod pallet { type PalletId: Get; /// The currency that people are electing with. - type Currency: LockableCurrency + type Currency: LockableCurrency> + ReservableCurrency; /// What to do when the members change. @@ -246,7 +250,7 @@ pub mod pallet { /// round will happen. If set to zero, no elections are ever triggered and the module will /// be in passive mode. #[pallet::constant] - type TermDuration: Get; + type TermDuration: Get>; /// The maximum number of candidates in a phragmen election. /// @@ -282,7 +286,7 @@ pub mod pallet { /// What to do at the end of each block. /// /// Checks if an election needs to happen or not. - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { let term_duration = T::TermDuration::get(); if !term_duration.is_zero() && (n % term_duration).is_zero() { Self::do_phragmen() @@ -327,7 +331,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_n: T::BlockNumber) -> Result<(), &'static str> { + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { Self::do_try_state() } } @@ -717,7 +721,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { assert!( self.members.len() as u32 <= T::DesiredMembers::get(), @@ -740,7 +744,10 @@ pub mod pallet { Members::::mutate(|members| { match members.binary_search_by(|m| m.who.cmp(member)) { Ok(_) => { - panic!("Duplicate member in elections-phragmen genesis: {}", member) + panic!( + "Duplicate member in elections-phragmen genesis: {:?}", + member + ) }, Err(pos) => members.insert( pos, @@ -1193,7 +1200,7 @@ impl ContainsLengthBound for Pallet { #[cfg(any(feature = "try-runtime", test))] impl Pallet { - fn do_try_state() -> Result<(), &'static str> { + fn do_try_state() -> Result<(), TryRuntimeError> { Self::try_state_members()?; Self::try_state_runners_up()?; Self::try_state_candidates()?; @@ -1204,20 +1211,20 @@ impl Pallet { /// [`Members`] state checks. Invariants: /// - Members are always sorted based on account ID. - fn try_state_members() -> Result<(), &'static str> { + fn try_state_members() -> Result<(), TryRuntimeError> { let mut members = Members::::get().clone(); members.sort_by_key(|m| m.who.clone()); if Members::::get() == members { Ok(()) } else { - Err("try_state checks: Members must be always sorted by account ID") + Err("try_state checks: Members must be always sorted by account ID".into()) } } // [`RunnersUp`] state checks. Invariants: // - Elements are sorted based on weight (worst to best). - fn try_state_runners_up() -> Result<(), &'static str> { + fn try_state_runners_up() -> Result<(), TryRuntimeError> { let mut sorted = RunnersUp::::get(); // worst stake first sorted.sort_by(|a, b| a.stake.cmp(&b.stake)); @@ -1225,27 +1232,28 @@ impl Pallet { if RunnersUp::::get() == sorted { Ok(()) } else { - Err("try_state checks: Runners Up must always be sorted by stake (worst to best)") + Err("try_state checks: Runners Up must always be sorted by stake (worst to best)" + .into()) } } // [`Candidates`] state checks. Invariants: // - Always sorted based on account ID. - fn try_state_candidates() -> Result<(), &'static str> { + fn try_state_candidates() -> Result<(), TryRuntimeError> { let mut candidates = Candidates::::get().clone(); candidates.sort_by_key(|(c, _)| c.clone()); if Candidates::::get() == candidates { Ok(()) } else { - Err("try_state checks: Candidates must be always sorted by account ID") + Err("try_state checks: Candidates must be always sorted by account ID".into()) } } // [`Candidates`] and [`RunnersUp`] state checks. Invariants: // - Candidates and runners-ups sets are disjoint. - fn try_state_candidates_runners_up_disjoint() -> Result<(), &'static str> { + fn try_state_candidates_runners_up_disjoint() -> Result<(), TryRuntimeError> { match Self::intersects(&Self::candidates_ids(), &Self::runners_up_ids()) { - true => Err("Candidates and runners up sets should always be disjoint"), + true => Err("Candidates and runners up sets should always be disjoint".into()), false => Ok(()), } } @@ -1253,11 +1261,12 @@ impl Pallet { // [`Members`], [`Candidates`] and [`RunnersUp`] state checks. Invariants: // - Members and candidates sets are disjoint; // - Members and runners-ups sets are disjoint. - fn try_state_members_disjoint() -> Result<(), &'static str> { + fn try_state_members_disjoint() -> Result<(), TryRuntimeError> { match Self::intersects(&Pallet::::members_ids(), &Self::candidates_ids()) && Self::intersects(&Pallet::::members_ids(), &Self::runners_up_ids()) { - true => Err("Members set should be disjoint from candidates and runners-up sets"), + true => + Err("Members set should be disjoint from candidates and runners-up sets".into()), false => Ok(()), } } @@ -1265,14 +1274,14 @@ impl Pallet { // [`Members`], [`RunnersUp`] and approval stake state checks. Invariants: // - Selected members should have approval stake; // - Selected RunnersUp should have approval stake. - fn try_state_members_approval_stake() -> Result<(), &'static str> { + fn try_state_members_approval_stake() -> Result<(), TryRuntimeError> { match Members::::get() .iter() .chain(RunnersUp::::get().iter()) .all(|s| s.stake != BalanceOf::::zero()) { true => Ok(()), - false => Err("Members and RunnersUp must have approval stake"), + false => Err("Members and RunnersUp must have approval stake".into()), } } @@ -1314,14 +1323,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -1347,7 +1355,7 @@ mod tests { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -1416,7 +1424,7 @@ mod tests { type PalletId = ElectionsPhragmenPalletId; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type ChangeMembers = TestChangeMembers; type InitializeMembers = (); type CandidacyBond = CandidacyBond; @@ -1438,10 +1446,7 @@ mod tests { sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Test { System: frame_system::{Pallet, Call, Event}, Balances: pallet_balances::{Pallet, Call, Event, Config}, @@ -1495,7 +1500,7 @@ mod tests { MEMBERS.with(|m| { *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| *m).collect::>() }); - let mut ext: sp_io::TestExternalities = GenesisConfig { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { balances: pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.balance_factor), diff --git a/frame/elections-phragmen/src/migrations/mod.rs b/frame/elections-phragmen/src/migrations/mod.rs index 3c40c51b1456c..f5403284126a0 100644 --- a/frame/elections-phragmen/src/migrations/mod.rs +++ b/frame/elections-phragmen/src/migrations/mod.rs @@ -17,6 +17,8 @@ //! All migrations of this pallet. +/// Migration to unreserve all pallet funds. +pub mod unlock_and_unreserve_all_funds; /// Version 3. pub mod v3; /// Version 4. diff --git a/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs b/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs new file mode 100644 index 0000000000000..482766ee97f54 --- /dev/null +++ b/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs @@ -0,0 +1,513 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A migration that unreserves all deposit and unlocks all stake held in the context of this +//! pallet. + +use core::iter::Sum; +use frame_support::{ + pallet_prelude::ValueQuery, + storage_alias, + traits::{Currency, LockIdentifier, LockableCurrency, OnRuntimeUpgrade, ReservableCurrency}, + weights::RuntimeDbWeight, + Parameter, Twox64Concat, +}; +use sp_core::Get; +use sp_runtime::traits::Zero; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +const LOG_TARGET: &str = "elections_phragmen::migrations::unlock_and_unreserve_all_funds"; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// The configuration for [`UnlockAndUnreserveAllFunds`]. +pub trait UnlockConfig: 'static { + /// The account ID used in the runtime. + type AccountId: Parameter + Ord; + /// The currency type used in the runtime. + /// + /// Should match the currency type previously used for the pallet, if applicable. + type Currency: LockableCurrency + ReservableCurrency; + /// The name of the pallet as previously configured in + /// [`construct_runtime!`](frame_support::construct_runtime). + type PalletName: Get<&'static str>; + /// The maximum number of votes per voter as configured previously in the previous runtime. + type MaxVotesPerVoter: Get; + /// Identifier for the elections-phragmen pallet's lock, as previously configured in the + /// runtime. + type PalletId: Get; + /// The DB weight as configured in the runtime to calculate the correct weight. + type DbWeight: Get; +} + +#[storage_alias(dynamic)] +type Members = StorageValue< + ::PalletName, + Vec::AccountId, BalanceOf>>, + ValueQuery, +>; + +#[storage_alias(dynamic)] +type RunnersUp = StorageValue< + ::PalletName, + Vec::AccountId, BalanceOf>>, + ValueQuery, +>; + +#[storage_alias(dynamic)] +type Candidates = StorageValue< + ::PalletName, + Vec<(::AccountId, BalanceOf)>, + ValueQuery, +>; + +#[storage_alias(dynamic)] +type Voting = StorageMap< + ::PalletName, + Twox64Concat, + ::AccountId, + crate::Voter<::AccountId, BalanceOf>, + ValueQuery, +>; + +/// A migration that unreserves all deposit and unlocks all stake held in the context of this +/// pallet. +/// +/// Useful to prevent funds from being locked up when the pallet is being deprecated. +/// +/// The pallet should be made inoperable before this migration is run. +/// +/// (See also [`RemovePallet`][frame_support::migrations::RemovePallet]) +pub struct UnlockAndUnreserveAllFunds(sp_std::marker::PhantomData); + +impl UnlockAndUnreserveAllFunds { + /// Calculates and returns the total amounts deposited and staked by each account in the context + /// of this pallet. + /// + /// The deposited and staked amounts are returned in two separate `BTreeMap` collections. + /// + /// The first `BTreeMap`, `account_deposited_sums`, contains each account's total amount + /// deposited. This includes deposits made by Members, RunnerUps, Candidates, and Voters. + /// + /// The second `BTreeMap`, `account_staked_sums`, contains each account's total amount staked. + /// This includes stakes made by Voters. + /// + /// # Returns + /// + /// This function returns a tuple of two `BTreeMap` collections and the weight of the reads: + /// + /// * `BTreeMap>`: Map of account IDs to their respective total + /// deposit sums. + /// * `BTreeMap>`: Map of account IDs to their respective total + /// staked sums. + /// * `frame_support::weights::Weight`: The weight of reading the storage. + fn get_account_deposited_and_staked_sums() -> ( + BTreeMap>, + BTreeMap>, + frame_support::weights::Weight, + ) { + use sp_runtime::Saturating; + + let members = Members::::get(); + let runner_ups = RunnersUp::::get(); + let candidates = Candidates::::get(); + + // Get the total amount deposited (Members, RunnerUps, Candidates and Voters all can have + // deposits). + let account_deposited_sums: BTreeMap> = members + // Massage all data structures into (account_id, deposit) tuples. + .iter() + .chain(runner_ups.iter()) + .map(|member| (member.who.clone(), member.deposit)) + .chain(candidates.iter().map(|(candidate, amount)| (candidate.clone(), *amount))) + .chain( + Voting::::iter().map(|(account_id, voter)| (account_id.clone(), voter.deposit)), + ) + // Finally, aggregate the tuples into a Map. + .fold(BTreeMap::new(), |mut acc, (id, deposit)| { + acc.entry(id.clone()).or_insert(Zero::zero()).saturating_accrue(deposit); + acc + }); + + // Get the total amount staked (only Voters stake) and count the number of voters. + let mut voters_len = 0; + let account_staked_sums: BTreeMap> = Voting::::iter() + .map(|(account_id, voter)| (account_id.clone(), voter.stake)) + .fold(BTreeMap::new(), |mut acc, (id, stake)| { + voters_len.saturating_accrue(1); + acc.entry(id.clone()).or_insert(Zero::zero()).saturating_accrue(stake); + acc + }); + + ( + account_deposited_sums, + account_staked_sums, + T::DbWeight::get().reads( + members + .len() + .saturating_add(runner_ups.len()) + .saturating_add(candidates.len()) + .saturating_add(voters_len.saturating_mul(T::MaxVotesPerVoter::get() as usize)) + as u64, + ), + ) + } +} + +impl OnRuntimeUpgrade for UnlockAndUnreserveAllFunds +where + BalanceOf: Sum, +{ + /// Collects pre-migration data useful for validating the migration was successful, and also + /// checks the integrity of deposited and reserved balances. + /// + /// Steps: + /// 1. Gets the deposited and staked balances for each account stored in this pallet. + /// 2. Collects actual pre-migration locked and reserved balances for each account. + /// 3. Checks the integrity of the deposited and reserved balances. + /// 4. Prints summary statistics about the state to be migrated. + /// 5. Encodes and returns pre-migration data to be used in post_upgrade. + /// + /// Fails with a `TryRuntimeError` if there's a discrepancy between the amount + /// reported as staked by the pallet and the amount actually locked in `Balances`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + use sp_std::collections::btree_set::BTreeSet; + + // Get staked and deposited balances as reported by this pallet. + let (account_deposited_sums, account_staked_sums, _) = + Self::get_account_deposited_and_staked_sums(); + + let all_accounts: BTreeSet = account_staked_sums + .keys() + .chain(account_deposited_sums.keys()) + .cloned() + .collect(); + + let account_reserved_before: BTreeMap> = all_accounts + .iter() + .map(|account| (account.clone(), T::Currency::reserved_balance(&account))) + .collect(); + + // Total deposited for each account *should* be less than or equal to the total reserved, + // however this does not hold for all cases due to bugs in the reserve logic of this pallet. + let bugged_deposits = all_accounts + .iter() + .filter(|account| { + account_deposited_sums.get(&account).unwrap_or(&Zero::zero()) > + account_reserved_before.get(&account).unwrap_or(&Zero::zero()) + }) + .count(); + + // Print some summary stats. + let total_stake_to_unlock = account_staked_sums.clone().into_values().sum::>(); + let total_deposits_to_unreserve = + account_deposited_sums.clone().into_values().sum::>(); + log::info!(target: LOG_TARGET, "Total accounts: {:?}", all_accounts.len()); + log::info!(target: LOG_TARGET, "Total stake to unlock: {:?}", total_stake_to_unlock); + log::info!( + target: LOG_TARGET, + "Total deposit to unreserve: {:?}", + total_deposits_to_unreserve + ); + if bugged_deposits > 0 { + log::warn!( + target: LOG_TARGET, + "Bugged deposits: {}/{}", + bugged_deposits, + all_accounts.len() + ); + } + + Ok(account_reserved_before.encode()) + } + + /// Executes the migration. + /// + /// Steps: + /// 1. Retrieves the deposit and stake amounts from the pallet. + /// 2. Unreserves the deposited funds for each account. + /// 3. Unlocks the staked funds for each account. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Get staked and deposited balances as reported by this pallet. + let (account_deposited_sums, account_staked_sums, initial_reads) = + Self::get_account_deposited_and_staked_sums(); + + // Deposited funds need to be unreserved. + for (account, unreserve_amount) in account_deposited_sums.iter() { + if unreserve_amount.is_zero() { + log::warn!(target: LOG_TARGET, "Unexpected zero amount to unreserve"); + continue + } + T::Currency::unreserve(&account, *unreserve_amount); + } + + // Staked funds need to be unlocked. + for (account, amount) in account_staked_sums.iter() { + if amount.is_zero() { + log::warn!(target: LOG_TARGET, "Unexpected zero amount to unlock"); + continue + } + T::Currency::remove_lock(T::PalletId::get(), account); + } + + T::DbWeight::get() + .reads_writes( + (account_deposited_sums.len().saturating_add(account_staked_sums.len())) as u64, + (account_deposited_sums.len().saturating_add(account_staked_sums.len())) as u64, + ) + .saturating_add(initial_reads) + } + + /// Performs post-upgrade sanity checks: + /// + /// 1. All expected locks were removed after the migration. + /// 2. The reserved balance for each account has been reduced by the expected amount. + #[cfg(feature = "try-runtime")] + fn post_upgrade( + account_reserved_before_bytes: Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use sp_runtime::Saturating; + + let account_reserved_before = + BTreeMap::>::decode(&mut &account_reserved_before_bytes[..]) + .map_err(|_| "Failed to decode account_reserved_before_bytes")?; + + // Get deposited balances as reported by this pallet. + let (account_deposited_sums, _, _) = Self::get_account_deposited_and_staked_sums(); + + // Check that the reserved balance is reduced by the expected deposited amount. + for (account, actual_reserved_before) in account_reserved_before { + let actual_reserved_after = T::Currency::reserved_balance(&account); + let expected_amount_deducted = *account_deposited_sums + .get(&account) + .unwrap_or(&Zero::zero()) + // .min here to handle bugged deposits where actual_reserved_before is less than the + // amount the pallet reports is reserved + .min(&actual_reserved_before); + let expected_reserved_after = + actual_reserved_before.saturating_sub(expected_amount_deducted); + assert!( + actual_reserved_after == expected_reserved_after, + "Reserved balance for {:?} is incorrect. actual before: {:?}, actual after, {:?}, expected deducted: {:?}", + account, + actual_reserved_before, + actual_reserved_after, + expected_amount_deducted, + ); + } + + Ok(()) + } +} + +#[cfg(all(feature = "try-runtime", test))] +mod test { + use super::*; + use crate::{ + tests::{Balances, ElectionsPhragmenPalletId, ExtBuilder, PhragmenMaxVoters, Test}, + Candidates, Members, RunnersUp, SeatHolder, Voter, Voting, + }; + use frame_support::{ + assert_ok, parameter_types, + traits::{Currency, OnRuntimeUpgrade, ReservableCurrency, WithdrawReasons}, + }; + + parameter_types! { + const PalletName: &'static str = "Elections"; + } + + struct UnlockConfigImpl; + impl super::UnlockConfig for UnlockConfigImpl { + type Currency = Balances; + type AccountId = u64; + type DbWeight = (); + type PalletName = PalletName; + type MaxVotesPerVoter = PhragmenMaxVoters; + type PalletId = ElectionsPhragmenPalletId; + } + + #[test] + fn unreserve_works_for_candidate() { + let candidate = 10; + let deposit = 100; + let initial_reserved = 15; + let initial_balance = 100_000; + ExtBuilder::default().build_and_execute(|| { + // Set up initial state. + ::Currency::make_free_balance_be(&candidate, initial_balance); + assert_ok!(::Currency::reserve(&candidate, initial_reserved)); + Candidates::::set(vec![(candidate, deposit)]); + assert_ok!(::Currency::reserve(&candidate, deposit)); + + // Sanity check: ensure initial Balance state was set up correctly. + assert_eq!( + ::Currency::reserved_balance(&candidate), + deposit + initial_reserved + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the candidate reserved balance was reduced by the expected amount. + assert_eq!( + ::Currency::reserved_balance(&candidate), + initial_reserved + ); + }); + } + + #[test] + fn unreserve_works_for_runner_up() { + let runner_up = 10; + let deposit = 100; + let initial_reserved = 15; + let initial_balance = 100_000; + ExtBuilder::default().build_and_execute(|| { + // Set up initial state. + ::Currency::make_free_balance_be(&runner_up, initial_balance); + assert_ok!(::Currency::reserve(&runner_up, initial_reserved)); + RunnersUp::::set(vec![SeatHolder { who: runner_up, deposit, stake: 10 }]); + assert_ok!(::Currency::reserve(&runner_up, deposit)); + + // Sanity check: ensure initial Balance state was set up correctly. + assert_eq!( + ::Currency::reserved_balance(&runner_up), + deposit + initial_reserved + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the reserved balance was reduced by the expected amount. + assert_eq!( + ::Currency::reserved_balance(&runner_up), + initial_reserved + ); + }); + } + + #[test] + fn unreserve_works_for_member() { + let member = 10; + let deposit = 100; + let initial_reserved = 15; + let initial_balance = 100_000; + ExtBuilder::default().build_and_execute(|| { + // Set up initial state. + ::Currency::make_free_balance_be(&member, initial_balance); + assert_ok!(::Currency::reserve(&member, initial_reserved)); + Members::::set(vec![SeatHolder { who: member, deposit, stake: 10 }]); + assert_ok!(::Currency::reserve(&member, deposit)); + + // Sanity check: ensure initial Balance state was set up correctly. + assert_eq!( + ::Currency::reserved_balance(&member), + deposit + initial_reserved + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the reserved balance was reduced by the expected amount. + assert_eq!( + ::Currency::reserved_balance(&member), + initial_reserved + ); + }); + } + + #[test] + fn unlock_and_unreserve_works_for_voter() { + let voter = 10; + let deposit = 100; + let initial_reserved = 15; + let initial_locks = vec![(b"somethin", 10)]; + let stake = 25; + let initial_balance = 100_000; + ExtBuilder::default().build_and_execute(|| { + let pallet_id = ::PalletId::get(); + + // Set up initial state. + ::Currency::make_free_balance_be(&voter, initial_balance); + assert_ok!(::Currency::reserve(&voter, initial_reserved)); + for lock in initial_locks.clone() { + ::Currency::set_lock( + *lock.0, + &voter, + lock.1, + WithdrawReasons::all(), + ); + } + Voting::::insert(voter, Voter { votes: vec![], deposit, stake }); + assert_ok!(::Currency::reserve(&voter, deposit)); + ::Currency::set_lock( + ::PalletId::get(), + &voter, + stake, + WithdrawReasons::all(), + ); + + // Sanity check: ensure initial Balance state was set up correctly. + assert_eq!( + ::Currency::reserved_balance(&voter), + deposit + initial_reserved + ); + let mut voter_all_locks = initial_locks.clone(); + voter_all_locks.push((&pallet_id, stake)); + assert_eq!( + ::Currency::locks(&voter) + .iter() + .map(|lock| (&lock.id, lock.amount)) + .collect::>(), + voter_all_locks + ); + + // Run the migration. + let bytes = UnlockAndUnreserveAllFunds::::pre_upgrade() + .unwrap_or_else(|e| panic!("pre_upgrade failed: {:?}", e)); + UnlockAndUnreserveAllFunds::::on_runtime_upgrade(); + assert_ok!(UnlockAndUnreserveAllFunds::::post_upgrade(bytes)); + + // Assert the voter lock was removed and the reserved balance was reduced by the + // expected amount. + assert_eq!( + ::Currency::reserved_balance(&voter), + initial_reserved + ); + assert_eq!( + ::Currency::locks(&voter) + .iter() + .map(|lock| (&lock.id, lock.amount)) + .collect::>(), + initial_locks + ); + }); + } +} diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index f0ebb8639e259..b7ed13dae9f73 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_elections_phragmen +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_elections_phragmen. pub trait WeightInfo { @@ -82,10 +86,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 33_623_000 picoseconds. - Weight::from_parts(34_531_239, 4764) - // Standard Error: 1_913 - .saturating_add(Weight::from_parts(131_360, 0).saturating_mul(v.into())) + // Minimum execution time: 33_028_000 picoseconds. + Weight::from_parts(34_073_914, 4764) + // Standard Error: 3_474 + .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -107,10 +111,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(47_067_453, 4764) - // Standard Error: 2_441 - .saturating_add(Weight::from_parts(130_306, 0).saturating_mul(v.into())) + // Minimum execution time: 45_725_000 picoseconds. + Weight::from_parts(47_169_586, 4764) + // Standard Error: 5_148 + .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -132,10 +136,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 46_094_000 picoseconds. - Weight::from_parts(47_054_638, 4764) - // Standard Error: 2_651 - .saturating_add(Weight::from_parts(137_251, 0).saturating_mul(v.into())) + // Minimum execution time: 45_519_000 picoseconds. + Weight::from_parts(47_339_108, 4764) + // Standard Error: 5_501 + .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -150,8 +154,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 49_652_000 picoseconds. - Weight::from_parts(50_217_000, 4764) + // Minimum execution time: 50_386_000 picoseconds. + Weight::from_parts(51_378_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -166,10 +170,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1570 + c * (48 ±0)` // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 37_797_000 picoseconds. - Weight::from_parts(38_384_713, 3055) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(71_486, 0).saturating_mul(c.into())) + // Minimum execution time: 38_987_000 picoseconds. + Weight::from_parts(41_302_276, 3055) + // Standard Error: 2_047 + .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -181,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `285 + c * (48 ±0)` // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 31_112_000 picoseconds. - Weight::from_parts(31_660_924, 1770) - // Standard Error: 754 - .saturating_add(Weight::from_parts(48_689, 0).saturating_mul(c.into())) + // Minimum execution time: 33_510_000 picoseconds. + Weight::from_parts(34_947_760, 1770) + // Standard Error: 1_781 + .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -203,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1900` // Estimated: `3385` - // Minimum execution time: 47_487_000 picoseconds. - Weight::from_parts(47_795_000, 3385) + // Minimum execution time: 50_603_000 picoseconds. + Weight::from_parts(51_715_000, 3385) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -214,8 +218,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 31_479_000 picoseconds. - Weight::from_parts(32_093_000, 2365) + // Minimum execution time: 33_441_000 picoseconds. + Weight::from_parts(34_812_000, 2365) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +248,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1900` // Estimated: `3593` - // Minimum execution time: 53_395_000 picoseconds. - Weight::from_parts(53_952_000, 3593) + // Minimum execution time: 57_289_000 picoseconds. + Weight::from_parts(58_328_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -267,12 +271,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 256]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115 + v * (811 ±0)` - // Estimated: `4587 + v * (3774 ±0)` - // Minimum execution time: 18_089_406_000 picoseconds. - Weight::from_parts(18_125_024_000, 4587) - // Standard Error: 296_666 - .saturating_add(Weight::from_parts(42_527_045, 0).saturating_mul(v.into())) + // Measured: `1149 + v * (811 ±0)` + // Estimated: `4621 + v * (3774 ±0)` + // Minimum execution time: 18_774_231_000 picoseconds. + Weight::from_parts(18_933_040_000, 4621) + // Standard Error: 301_534 + .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) @@ -303,12 +307,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_193_774_000 picoseconds. - Weight::from_parts(1_196_649_000, 178887) - // Standard Error: 617_531 - .saturating_add(Weight::from_parts(17_672_923, 0).saturating_mul(v.into())) - // Standard Error: 39_622 - .saturating_add(Weight::from_parts(846_866, 0).saturating_mul(e.into())) + // Minimum execution time: 1_281_877_000 picoseconds. + Weight::from_parts(1_288_147_000, 178887) + // Standard Error: 528_851 + .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) + // Standard Error: 33_932 + .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) @@ -339,10 +343,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 33_623_000 picoseconds. - Weight::from_parts(34_531_239, 4764) - // Standard Error: 1_913 - .saturating_add(Weight::from_parts(131_360, 0).saturating_mul(v.into())) + // Minimum execution time: 33_028_000 picoseconds. + Weight::from_parts(34_073_914, 4764) + // Standard Error: 3_474 + .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -364,10 +368,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(47_067_453, 4764) - // Standard Error: 2_441 - .saturating_add(Weight::from_parts(130_306, 0).saturating_mul(v.into())) + // Minimum execution time: 45_725_000 picoseconds. + Weight::from_parts(47_169_586, 4764) + // Standard Error: 5_148 + .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -389,10 +393,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 46_094_000 picoseconds. - Weight::from_parts(47_054_638, 4764) - // Standard Error: 2_651 - .saturating_add(Weight::from_parts(137_251, 0).saturating_mul(v.into())) + // Minimum execution time: 45_519_000 picoseconds. + Weight::from_parts(47_339_108, 4764) + // Standard Error: 5_501 + .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -407,8 +411,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 49_652_000 picoseconds. - Weight::from_parts(50_217_000, 4764) + // Minimum execution time: 50_386_000 picoseconds. + Weight::from_parts(51_378_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -423,10 +427,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1570 + c * (48 ±0)` // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 37_797_000 picoseconds. - Weight::from_parts(38_384_713, 3055) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(71_486, 0).saturating_mul(c.into())) + // Minimum execution time: 38_987_000 picoseconds. + Weight::from_parts(41_302_276, 3055) + // Standard Error: 2_047 + .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -438,10 +442,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `285 + c * (48 ±0)` // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 31_112_000 picoseconds. - Weight::from_parts(31_660_924, 1770) - // Standard Error: 754 - .saturating_add(Weight::from_parts(48_689, 0).saturating_mul(c.into())) + // Minimum execution time: 33_510_000 picoseconds. + Weight::from_parts(34_947_760, 1770) + // Standard Error: 1_781 + .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -460,8 +464,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1900` // Estimated: `3385` - // Minimum execution time: 47_487_000 picoseconds. - Weight::from_parts(47_795_000, 3385) + // Minimum execution time: 50_603_000 picoseconds. + Weight::from_parts(51_715_000, 3385) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -471,8 +475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 31_479_000 picoseconds. - Weight::from_parts(32_093_000, 2365) + // Minimum execution time: 33_441_000 picoseconds. + Weight::from_parts(34_812_000, 2365) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -501,8 +505,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1900` // Estimated: `3593` - // Minimum execution time: 53_395_000 picoseconds. - Weight::from_parts(53_952_000, 3593) + // Minimum execution time: 57_289_000 picoseconds. + Weight::from_parts(58_328_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -524,12 +528,12 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 256]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115 + v * (811 ±0)` - // Estimated: `4587 + v * (3774 ±0)` - // Minimum execution time: 18_089_406_000 picoseconds. - Weight::from_parts(18_125_024_000, 4587) - // Standard Error: 296_666 - .saturating_add(Weight::from_parts(42_527_045, 0).saturating_mul(v.into())) + // Measured: `1149 + v * (811 ±0)` + // Estimated: `4621 + v * (3774 ±0)` + // Minimum execution time: 18_774_231_000 picoseconds. + Weight::from_parts(18_933_040_000, 4621) + // Standard Error: 301_534 + .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) @@ -560,12 +564,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_193_774_000 picoseconds. - Weight::from_parts(1_196_649_000, 178887) - // Standard Error: 617_531 - .saturating_add(Weight::from_parts(17_672_923, 0).saturating_mul(v.into())) - // Standard Error: 39_622 - .saturating_add(Weight::from_parts(846_866, 0).saturating_mul(e.into())) + // Minimum execution time: 1_281_877_000 picoseconds. + Weight::from_parts(1_288_147_000, 178887) + // Standard Error: 528_851 + .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) + // Standard Error: 33_932 + .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) diff --git a/frame/examples/Cargo.toml b/frame/examples/Cargo.toml new file mode 100644 index 0000000000000..af67bef792b6f --- /dev/null +++ b/frame/examples/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pallet-examples" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "The single package with various examples for frame pallets" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +pallet-example-basic = { default-features = false, path = "./basic" } +pallet-default-config-example = { default-features = false, path = "./default-config" } +pallet-example-offchain-worker = { default-features = false, path = "./offchain-worker" } +pallet-example-kitchensink = { default-features = false, path = "./kitchensink" } +pallet-dev-mode = { default-features = false, path = "./dev-mode" } +pallet-example-split = { default-features = false, path = "./split" } + +[features] +default = [ "std" ] +std = [ + "pallet-example-basic/std", + "pallet-default-config-example/std", + "pallet-example-offchain-worker/std", + "pallet-example-kitchensink/std", + "pallet-dev-mode/std", + "pallet-example-split/std", +] +try-runtime = [ + "pallet-example-basic/try-runtime", + "pallet-default-config-example/try-runtime", + "pallet-example-offchain-worker/try-runtime", + "pallet-example-kitchensink/try-runtime", + "pallet-dev-mode/try-runtime", + "pallet-example-split/try-runtime", +] diff --git a/frame/examples/basic/Cargo.toml b/frame/examples/basic/Cargo.toml index 848d4bbc94d72..60bfa1352f482 100644 --- a/frame/examples/basic/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } [features] default = ["std"] @@ -40,6 +40,18 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-core/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index af159c0f4bc98..426e9b7ec648c 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -388,21 +388,21 @@ pub mod pallet { // dispatched. // // This function must return the weight consumed by `on_initialize` and `on_finalize`. - fn on_initialize(_n: T::BlockNumber) -> Weight { + fn on_initialize(_n: BlockNumberFor) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. Weight::zero() } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. - fn on_finalize(_n: T::BlockNumber) { + fn on_finalize(_n: BlockNumberFor) { // Perform necessary data/state clean up here. } // A runtime code run after every block and have access to extended set of APIs. // // For instance you can generate extrinsics for the upcoming produced block. - fn offchain_worker(_n: T::BlockNumber) { + fn offchain_worker(_n: BlockNumberFor) { // We don't do anything here. // but we could dispatch extrinsic (transaction/unsigned/inherent) using // sp_io::submit_extrinsic. @@ -627,7 +627,7 @@ pub mod pallet { // The build of genesis for the pallet. #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { >::put(&self.dummy); for (a, b) in &self.bar { diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 1d9cf81a5074c..addf219dc3c39 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -27,24 +27,19 @@ use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_example_basic; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; // For testing the pallet, we construct a mock runtime. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Example: pallet_example_basic::{Pallet, Call, Storage, Config, Event}, } @@ -56,14 +51,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -89,7 +83,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -102,7 +96,7 @@ impl Config for Test { // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { + let t = RuntimeGenesisConfig { // We use default for brevity, but you can configure as desired if needed. system: Default::default(), balances: Default::default(), diff --git a/frame/examples/default-config/Cargo.toml b/frame/examples/default-config/Cargo.toml new file mode 100644 index 0000000000000..eac342b736f2e --- /dev/null +++ b/frame/examples/default-config/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "pallet-default-config-example" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet demonstrating derive_impl / default_config in action" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { default-features = false, path = "../../support" } +frame-system = { default-features = false, path = "../../system" } + +sp-io = { default-features = false, path = "../../../primitives/io" } +sp-runtime = { default-features = false, path = "../../../primitives/runtime" } +sp-std = { default-features = false, path = "../../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/examples/default-config/README.md b/frame/examples/default-config/README.md new file mode 100644 index 0000000000000..b1a67a5c16e55 --- /dev/null +++ b/frame/examples/default-config/README.md @@ -0,0 +1,8 @@ +# Default Config Example Pallet + +An example pallet demonstrating the ability to derive default testing configs via +`#[derive_impl]` and `#[pallet::config(with_default)]`. + +Run `cargo doc --package pallet-default-config-example --open` to view this pallet's documentation. + +License: MIT-0 diff --git a/frame/examples/default-config/src/lib.rs b/frame/examples/default-config/src/lib.rs new file mode 100644 index 0000000000000..8715b8c45ff0e --- /dev/null +++ b/frame/examples/default-config/src/lib.rs @@ -0,0 +1,227 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Default Config Pallet Example +//! +//! A simple example of a FRAME pallet that utilizes [`frame_support::derive_impl`] to demonstrate +//! the simpler way to implement `Config` trait of pallets. This example only showcases this in a +//! `mock.rs` environment, but the same applies to a real runtime as well. +//! +//! See the source code of [`tests`] for a real examples. +//! +//! Study the following types: +//! +//! - [`pallet::DefaultConfig`], and how it differs from [`pallet::Config`]. +//! - [`pallet::config_preludes::TestDefaultConfig`] and how it implements +//! [`pallet::DefaultConfig`]. +//! - Notice how [`pallet::DefaultConfig`] is independent of [`frame_system::Config`]. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + + /// This pallet is annotated to have a default config. This will auto-generate + /// [`DefaultConfig`]. + /// + /// It will be an identical, but won't have anything that is `#[pallet::no_default]`. + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + /// The overarching event type. This is coming from the runtime, and cannot have a default. + /// In general, `Runtime*`-oriented types cannot have a sensible default. + #[pallet::no_default] // optional. `RuntimeEvent` is automatically excluded as well. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// An input parameter to this pallet. This value can have a default, because it is not + /// reliant on `frame_system::Config` or the overarching runtime in any way. + type WithDefaultValue: Get; + + /// Same as [`Config::WithDefaultValue`], but we don't intend to define a default for this + /// in our tests below. + type OverwrittenDefaultValue: Get; + + /// An input parameter that relies on `::AccountId`. This can + /// too have a default, as long as as it is present in `frame_system::DefaultConfig`. + type CanDeriveDefaultFromSystem: Get; + + /// We might chose to declare as one that doesn't have a default, for whatever semantical + /// reason. + #[pallet::no_default] + type HasNoDefault: Get; + + /// Some types can technically have no default, such as those the rely on + /// `frame_system::Config` but are not present in `frame_system::DefaultConfig`. For + /// example, a `RuntimeCall` cannot reasonably have a default. + #[pallet::no_default] // if we skip this, there will be a compiler error. + type CannotHaveDefault: Get; + + /// Something that is a normal type, with default. + type WithDefaultType; + + /// Same as [`Config::WithDefaultType`], but we don't intend to define a default for this + /// in our tests below. + type OverwrittenDefaultType; + } + + /// Container for different types that implement [`DefaultConfig`]` of this pallet. + pub mod config_preludes { + // This will help use not need to disambiguate anything when using `derive_impl`. + use super::*; + use frame_support::derive_impl; + + /// A type providing default configurations for this pallet in testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type WithDefaultValue = frame_support::traits::ConstU32<42>; + type OverwrittenDefaultValue = frame_support::traits::ConstU32<42>; + + // `frame_system::config_preludes::TestDefaultConfig` declares account-id as u64. + type CanDeriveDefaultFromSystem = frame_support::traits::ConstU64<42>; + + type WithDefaultType = u32; + type OverwrittenDefaultType = u32; + } + + /// A type providing default configurations for this pallet in another environment. Examples + /// could be a parachain, or a solo-chain. + /// + /// Appropriate derive for `frame_system::DefaultConfig` needs to be provided. In this + /// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again. + pub struct OtherDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::DefaultConfig for OtherDefaultConfig {} + + #[frame_support::register_default_impl(OtherDefaultConfig)] + impl DefaultConfig for OtherDefaultConfig { + type WithDefaultValue = frame_support::traits::ConstU32<66>; + type OverwrittenDefaultValue = frame_support::traits::ConstU32<66>; + type CanDeriveDefaultFromSystem = frame_support::traits::ConstU64<42>; + type WithDefaultType = u32; + type OverwrittenDefaultType = u32; + } + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + pub enum Event {} +} + +#[cfg(any(test, doc))] +pub mod tests { + use super::*; + use frame_support::{derive_impl, parameter_types}; + use pallet::{self as pallet_default_config_example, config_preludes::*}; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub struct Runtime { + System: frame_system, + DefaultPallet: pallet_default_config_example, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + // these items are defined by frame-system as `no_default`, so we must specify them here. + // Note that these are types that actually rely on the outer runtime, and can't sensibly + // have an _independent_ default. + type Block = Block; + type BlockHashCount = frame_support::traits::ConstU64<10>; + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); + + // all of this is coming from `frame_system::config_preludes::TestDefaultConfig`. + + // type Nonce = u32; + // type BlockNumber = u32; + // type Hash = sp_core::hash::H256; + // type Hashing = sp_runtime::traits::BlakeTwo256; + // type AccountId = u64; + // type Lookup = sp_runtime::traits::IdentityLookup; + // type BlockHashCount = frame_support::traits::ConstU32<10>; + // type MaxConsumers = frame_support::traits::ConstU32<16>; + // type AccountData = (); + // type OnNewAccount = (); + // type OnKilledAccount = (); + // type SystemWeightInfo = (); + // type SS58Prefix = (); + // type Version = (); + // type BlockWeights = (); + // type BlockLength = (); + // type DbWeight = (); + + // you could still overwrite any of them if desired. + type SS58Prefix = frame_support::traits::ConstU16<456>; + } + + parameter_types! { + pub const SomeCall: RuntimeCall = RuntimeCall::System(frame_system::Call::::remark { remark: vec![] }); + } + + #[derive_impl(TestDefaultConfig as pallet::DefaultConfig)] + impl pallet_default_config_example::Config for Runtime { + // These two both cannot have defaults. + type RuntimeEvent = RuntimeEvent; + + type HasNoDefault = frame_support::traits::ConstU32<1>; + type CannotHaveDefault = SomeCall; + + type OverwrittenDefaultValue = frame_support::traits::ConstU32<678>; + type OverwrittenDefaultType = u128; + } + + #[test] + fn it_works() { + use frame_support::traits::Get; + use pallet::{Config, DefaultConfig}; + + // assert one of the value types that is not overwritten. + assert_eq!( + <::WithDefaultValue as Get>::get(), + <::WithDefaultValue as Get>::get() + ); + + // assert one of the value types that is overwritten. + assert_eq!(<::OverwrittenDefaultValue as Get>::get(), 678u32); + + // assert one of the types that is not overwritten. + assert_eq!( + std::any::TypeId::of::<::WithDefaultType>(), + std::any::TypeId::of::<::WithDefaultType>() + ); + + // assert one of the types that is overwritten. + assert_eq!( + std::any::TypeId::of::<::OverwrittenDefaultType>(), + std::any::TypeId::of::() + ) + } +} diff --git a/frame/examples/dev-mode/Cargo.toml b/frame/examples/dev-mode/Cargo.toml index a1f8de4b09d74..66b87a5b5245e 100644 --- a/frame/examples/dev-mode/Cargo.toml +++ b/frame/examples/dev-mode/Cargo.toml @@ -13,18 +13,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } [features] default = ["std"] @@ -38,5 +38,11 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/dev-mode/src/lib.rs b/frame/examples/dev-mode/src/lib.rs index dbc302f4997d4..d57e7a5b76b82 100644 --- a/frame/examples/dev-mode/src/lib.rs +++ b/frame/examples/dev-mode/src/lib.rs @@ -60,8 +60,8 @@ pub mod pallet { #[pallet::call] impl Pallet { - #[pallet::call_index(0)] - /// No need to define a `weight` attribute here because of `dev_mode`. + // No need to define a `call_index` attribute here because of `dev_mode`. + // No need to define a `weight` attribute here because of `dev_mode`. pub fn add_dummy(origin: OriginFor, id: T::AccountId) -> DispatchResult { ensure_root(origin)?; @@ -78,8 +78,8 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(1)] - /// No need to define a `weight` attribute here because of `dev_mode`. + // No need to define a `call_index` attribute here because of `dev_mode`. + // No need to define a `weight` attribute here because of `dev_mode`. pub fn set_bar( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/examples/dev-mode/src/tests.rs b/frame/examples/dev-mode/src/tests.rs index e2f06ddda6cd7..ba98f5174ce20 100644 --- a/frame/examples/dev-mode/src/tests.rs +++ b/frame/examples/dev-mode/src/tests.rs @@ -21,24 +21,19 @@ use crate::*; use frame_support::{assert_ok, traits::ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_dev_mode; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; // For testing the pallet, we construct a mock runtime. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Example: pallet_dev_mode::{Pallet, Call, Storage, Event}, } @@ -50,14 +45,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -83,7 +77,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type MaxHolds = (); } @@ -94,7 +88,7 @@ impl Config for Test { // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { + let t = RuntimeGenesisConfig { // We use default for brevity, but you can configure as desired if needed. system: Default::default(), balances: Default::default(), diff --git a/frame/examples/kitchensink/Cargo.toml b/frame/examples/kitchensink/Cargo.toml new file mode 100644 index 0000000000000..0537f497b64df --- /dev/null +++ b/frame/examples/kitchensink/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-example-kitchensink" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example kitchensink pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } + +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../benchmarking" } + +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } + +[dev-dependencies] +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } + +[features] +default = ["std"] +std = [ + "codec/std", + "log/std", + "scale-info/std", + + "frame-support/std", + "frame-system/std", + + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + + "frame-benchmarking?/std", + + "pallet-balances/std", + "sp-core/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/examples/kitchensink/src/benchmarking.rs b/frame/examples/kitchensink/src/benchmarking.rs new file mode 100644 index 0000000000000..24da581fc967b --- /dev/null +++ b/frame/examples/kitchensink/src/benchmarking.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for `pallet-example-kitchensink`. + +// Only enable this module for benchmarking. +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +#[allow(unused)] +use crate::Pallet as Kitchensink; + +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +// To actually run this benchmark on pallet-example-kitchensink, we need to put this pallet into the +// runtime and compile it with `runtime-benchmarks` feature. The detail procedures are +// documented at: +// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ +// +// The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. +// The exact command of how the estimate generated is printed at the top of the file. + +// Details on using the benchmarks macro can be seen at: +// https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks +#[benchmarks] +mod benchmarks { + use super::*; + + // This will measure the execution time of `set_foo`. + #[benchmark] + fn set_foo_benchmark() { + // This is the benchmark setup phase. + // `set_foo` is a constant time function, hence we hard-code some random value here. + let value = 1000u32.into(); + #[extrinsic_call] + set_foo(RawOrigin::Root, value, 10u128); // The execution phase is just running `set_foo` extrinsic call + + // This is the optional benchmark verification phase, asserting certain states. + assert_eq!(Pallet::::foo(), Some(value)) + } + + // This line generates test cases for benchmarking, and could be run by: + // `cargo test -p pallet-example-kitchensink --all-features`, you will see one line per case: + // `test benchmarking::bench_sort_vector ... ok` + // `test benchmarking::bench_accumulate_dummy ... ok` + // `test benchmarking::bench_set_dummy_benchmark ... ok` in the result. + // + // The line generates three steps per benchmark, with repeat=1 and the three steps are + // [low, mid, high] of the range. + impl_benchmark_test_suite!(Kitchensink, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/examples/kitchensink/src/lib.rs b/frame/examples/kitchensink/src/lib.rs new file mode 100644 index 0000000000000..0fbffc971da62 --- /dev/null +++ b/frame/examples/kitchensink/src/lib.rs @@ -0,0 +1,330 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Kitchensink Example Pallet +//! +//! **This pallet serves as an example and is not meant to be used in production.** +//! +//! The kitchen-sink catalog of the the FRAME macros and their various syntax options. +//! +//! This example does not focus on pallet instancing, `dev_mode`, and does nto include any 'where' +//! clauses on `T`. These will both incur additional complexity to the syntax, but are not discussed +//! here. + +#![cfg_attr(not(feature = "std"), no_std)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +pub mod weights; +pub use weights::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The config trait of the pallet. You can basically do anything with the config trait that you + /// can do with a normal rust trait: import items consisting of types, constants and functions. + /// + /// A very common pattern is for a pallet to import implementations of traits such as + /// [`frame_support::traits::Currency`], [`frame_support::traits::fungibles::Inspect`] and + /// [`frame_support::traits::Get`]. These are all types that the pallet is delegating to the top + /// level runtime to provide to it. + /// + /// The `FRAME`-specific syntax are: + /// + /// * the use of `#[pallet::constant]`([`frame_support::procedural`]), which places a `Get` + /// implementation in the metadata. + /// * `type RuntimeEvent`, which is mandatory if your pallet has events. See TODO. + /// * Needless to say, because [`Config`] is bounded by [`frame_system::Config`], you can use + /// all the items from [`frame_system::Config`] as well, such as `AccountId`. + /// * `#[pallet::disable_frame_system_supertrait_check]` would remove the need for + /// `frame_system::Config` to exist, which you should almost never need. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching runtime event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; + + /// This is a normal Rust type, nothing specific to FRAME here. + type Currency: frame_support::traits::tokens::fungible::Inspect; + + /// Similarly, let the runtime decide this. + fn some_function() -> u32; + + /// And this + const FOO: u32; + + /// This is a FRAME-specific item. It will be placed in the metadata of the pallet, and + /// therefore can be queried by offchain applications. + #[pallet::constant] + type InMetadata: Get; + } + + /// Allows you to define some extra constants to be added into constant metadata. + #[pallet::extra_constants] + impl Pallet { + #[allow(non_snake_case)] + fn SomeValue() -> u32 { + unimplemented!() + } + + #[pallet::constant_name(OtherValue)] + fn arbitrary_name() -> u32 { + unimplemented!() + } + } + + const STORAGE_VERSION: frame_support::traits::StorageVersion = StorageVersion::new(1); + + /// The pallet struct. There's nothing special to FRAME about this; it can implement functions + /// in an impl blocks, traits and so on. + #[pallet::pallet] + #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// Allows you to define some origin for the pallet. + #[pallet::origin] + pub type Origin = frame_system::RawOrigin<::AccountId>; + + // first, we showcase all the possible storage types, with most of their details. + + /// A storage value. We mark this as unbounded, alter its prefix, and define a custom storage + /// getter for it. + /// + /// The value is stored a single trie node, and therefore can be retrieved with a single + /// database access. + #[pallet::storage] + #[pallet::unbounded] // optional + #[pallet::storage_prefix = "OtherFoo"] // optional + #[pallet::getter(fn foo)] // optional + pub type Foo = StorageValue; + + #[pallet::type_value] + pub fn DefaultForFoo() -> u32 { + 1 + } + + #[pallet::storage] + pub type FooWithDefault = + StorageValue; + + /// A storage map. This creates a mapping from keys of type `u32` to values of type `u32`. + /// + /// Keys and values can be iterated, albeit each value is stored under a unique trie key, + /// meaning that an iteration consists of many database accesses. + #[pallet::storage] + pub type Bar = StorageMap; + + /// Conceptually same as `StorageMap<>` where the key is a tuple of `(u32, u32)`. On top, it + /// provides some functions to iterate or remove items based on only the first key. + #[pallet::storage] + pub type Qux = StorageDoubleMap< + Hasher1 = Blake2_128Concat, + Key1 = u32, + Hasher2 = Blake2_128Concat, + Key2 = u32, + Value = u32, + >; + + /// Same as `StorageDoubleMap`, but with arbitrary number of keys. + #[pallet::storage] + pub type Quux = StorageNMap< + Key = ( + NMapKey, + NMapKey, + NMapKey, + ), + Value = u64, + >; + + /// In all of these examples, we chose a syntax where the storage item is defined using the + /// explicit generic syntax (`X = Y`). Alternatively: + #[pallet::storage] + pub type AlternativeSyntax = StorageMap<_, Blake2_128Concat, u32, u32>; + + /// Lastly, all storage items, as you saw, had to be generic over `T`. If they want to use an + /// item from `Config`, `` should be used. + #[pallet::storage] + pub type AlternativeSyntax2 = StorageMap<_, Blake2_128Concat, T::AccountId, u32>; + + /// The genesis config type. This allows the pallet to define how it should initialized upon + /// genesis. + /// + /// It can be generic over `T` or not, depending on whether it is or not. + #[pallet::genesis_config] + pub struct GenesisConfig { + pub foo: u32, + pub bar: BlockNumberFor, + } + + impl Default for GenesisConfig { + fn default() -> Self { + Self { foo: 0, bar: Default::default() } + } + } + + /// Allows you to define how `genesis_configuration is built. + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + Foo::::put(self.foo); + } + } + + /// The call declaration. This states the entry points that we handle. The + /// macro takes care of the marshalling of arguments and dispatch. + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::set_foo_benchmark())] + pub fn set_foo( + _: OriginFor, + new_foo: u32, + #[pallet::compact] _other_compact: u128, + ) -> DispatchResult { + Foo::::set(Some(new_foo)); + + Ok(()) + } + } + + /// The event type. This exactly like a normal Rust enum. + /// + /// It can or cannot be generic over ``. Note that unlike a normal enum, if none of + /// the variants actually use ``, the macro will generate a hidden `PhantomData` + /// variant. + /// + /// The `generate_deposit` macro generates a function on `Pallet` called `deposit_event` which + /// will properly convert the error type of your pallet into `RuntimeEvent` (recall `type + /// RuntimeEvent: From>`, so it can be converted) and deposit it via + /// `frame_system::Pallet::deposit_event`. + #[pallet::event] + #[pallet::generate_deposit(pub fn deposit_event)] + pub enum Event { + /// A simple tuple style variant. + SomethingHappened(u32), + /// A simple struct-style variant. Note that we use `AccountId` from `T` because `T: + /// Config`, which by extension implies `T: frame_system::Config`. + SomethingDetailedHappened { at: u32, to: T::AccountId }, + /// Another variant. + SomeoneJoined(T::AccountId), + } + + /// The error enum. Must always be generic over ``, which is expanded to ``. + #[pallet::error] + pub enum Error { + SomethingWentWrong, + SomethingBroke, + } + + /// All the possible hooks that a pallet can have. See [`frame_support::traits::Hooks`] for more + /// info. + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() {} + + fn offchain_worker(_n: BlockNumberFor) { + unimplemented!() + } + + fn on_initialize(_n: BlockNumberFor) -> Weight { + unimplemented!() + } + + fn on_finalize(_n: BlockNumberFor) { + unimplemented!() + } + + fn on_idle(_n: BlockNumberFor, _remaining_weight: Weight) -> Weight { + unimplemented!() + } + + fn on_runtime_upgrade() -> Weight { + unimplemented!() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + unimplemented!() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + unimplemented!() + } + + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { + unimplemented!() + } + } + + /// Allows you to define an enum on the pallet which will then instruct + /// `construct_runtime` to amalgamate all similarly-named enums from other + /// pallets into an aggregate enum. + #[pallet::composite_enum] + pub enum HoldReason { + Staking, + } + + /// Allows the pallet to validate some unsigned transaction. See + /// [`sp_runtime::traits::ValidateUnsigned`] for more info. + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(_: TransactionSource, _: &Self::Call) -> TransactionValidity { + unimplemented!() + } + + fn pre_dispatch(_: &Self::Call) -> Result<(), TransactionValidityError> { + unimplemented!() + } + } + + /// Allows the pallet to provide some inherent. See [`frame_support::inherent::ProvideInherent`] + /// for more info. + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!() + } + } +} diff --git a/frame/examples/kitchensink/src/tests.rs b/frame/examples/kitchensink/src/tests.rs new file mode 100644 index 0000000000000..b2af7c8983f56 --- /dev/null +++ b/frame/examples/kitchensink/src/tests.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example-kitchensink. + +use crate::*; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::ConstU64}; +use sp_runtime::BuildStorage; +// Reexport crate as its pallet name for construct_runtime. +use crate as pallet_example_kitchensink; + +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Kitchensink: pallet_example_kitchensink::{Pallet, Call, Storage, Config, Event}, + } +); + +/// Using a default config for [`frame_system`] in tests. See `default-config` example for more +/// details. +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU64<10>; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); + + type AccountData = pallet_balances::AccountData; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type MaxHolds = (); +} + +parameter_types! { + pub const InMetadata: u32 = 30; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + + type Currency = Balances; + type InMetadata = InMetadata; + + const FOO: u32 = 100; + + fn some_function() -> u32 { + 5u32 + } +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + balances: Default::default(), + kitchensink: pallet_example_kitchensink::GenesisConfig { bar: 32, foo: 24 }, + } + .build_storage() + .unwrap(); + t.into() +} + +#[test] +fn set_foo_works() { + new_test_ext().execute_with(|| { + assert_eq!(Foo::::get(), Some(24)); // From genesis config. + + let val1 = 42; + assert_ok!(Kitchensink::set_foo(RuntimeOrigin::root(), val1, 2)); + assert_eq!(Foo::::get(), Some(val1)); + }); +} diff --git a/frame/examples/kitchensink/src/weights.rs b/frame/examples/kitchensink/src/weights.rs new file mode 100644 index 0000000000000..1d083a9b80eea --- /dev/null +++ b/frame/examples/kitchensink/src/weights.rs @@ -0,0 +1,68 @@ + +//! Autogenerated weights for `pallet_example_kitchensink` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `MacBook.local`, CPU: `` +//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_example_kitchensink +// --extrinsic +// * +// --steps +// 20 +// --repeat +// 10 +// --output +// frame/examples/kitchensink/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn set_foo_benchmark() -> Weight; +} + +/// Weight functions for `pallet_example_kitchensink`. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: Kitchensink OtherFoo (r:0 w:1) + /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + fn set_foo_benchmark() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + +impl WeightInfo for () { + /// Storage: Kitchensink OtherFoo (r:0 w:1) + /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + fn set_foo_benchmark() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(RocksDbWeight::get().writes(1)) + } +} diff --git a/frame/examples/offchain-worker/Cargo.toml b/frame/examples/offchain-worker/Cargo.toml index e582b0f99714f..dd3f8e070d583 100644 --- a/frame/examples/offchain-worker/Cargo.toml +++ b/frame/examples/offchain-worker/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } -sp-keystore = { version = "0.13.0", optional = true, path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-keystore = { version = "0.27.0", optional = true, path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [features] default = ["std"] @@ -36,8 +36,12 @@ std = [ "scale-info/std", "sp-core/std", "sp-io/std", - "sp-keystore", - "sp-runtime/std", - "sp-std/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index 6ce8524174200..6c1fa6ea8ec42 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -53,6 +53,7 @@ use frame_system::{ AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, SignedPayload, Signer, SigningTypes, SubmitTransaction, }, + pallet_prelude::BlockNumberFor, }; use lite_json::json::JsonValue; use sp_core::crypto::KeyTypeId; @@ -136,14 +137,14 @@ pub mod pallet { /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate /// sending between distinct runs of this offchain worker. #[pallet::constant] - type GracePeriod: Get; + type GracePeriod: Get>; /// Number of blocks of cooldown after unsigned transaction is included. /// /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` /// blocks. #[pallet::constant] - type UnsignedInterval: Get; + type UnsignedInterval: Get>; /// A configuration for base priority of unsigned transactions. /// @@ -171,7 +172,7 @@ pub mod pallet { /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), /// so the code should be able to handle that. /// You can use `Local Storage` API to coordinate runs of the worker. - fn offchain_worker(block_number: T::BlockNumber) { + fn offchain_worker(block_number: BlockNumberFor) { // Note that having logs compiled to WASM may cause the size of the blob to increase // significantly. You can use `RuntimeDebug` custom derive to hide details of the types // in WASM. The `sp-api` crate also provides a feature `disable-logging` to disable @@ -258,7 +259,7 @@ pub mod pallet { #[pallet::weight({0})] pub fn submit_price_unsigned( origin: OriginFor, - _block_number: T::BlockNumber, + _block_number: BlockNumberFor, price: u32, ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. @@ -275,7 +276,7 @@ pub mod pallet { #[pallet::weight({0})] pub fn submit_price_unsigned_with_signed_payload( origin: OriginFor, - price_payload: PricePayload, + price_payload: PricePayload>, _signature: T::Signature, ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. @@ -341,7 +342,7 @@ pub mod pallet { /// This storage entry defines when new transaction is going to be accepted. #[pallet::storage] #[pallet::getter(fn next_unsigned_at)] - pub(super) type NextUnsignedAt = StorageValue<_, T::BlockNumber, ValueQuery>; + pub(super) type NextUnsignedAt = StorageValue<_, BlockNumberFor, ValueQuery>; } /// Payload used by this example crate to hold price @@ -353,7 +354,7 @@ pub struct PricePayload { public: Public, } -impl SignedPayload for PricePayload { +impl SignedPayload for PricePayload> { fn public(&self) -> T::Public { self.public.clone() } @@ -374,7 +375,7 @@ impl Pallet { /// and local storage usage. /// /// Returns a type of transaction that should be produced in current run. - fn choose_transaction_type(block_number: T::BlockNumber) -> TransactionType { + fn choose_transaction_type(block_number: BlockNumberFor) -> TransactionType { /// A friendlier name for the error that is going to be returned in case we are in the grace /// period. const RECENTLY_SENT: () = (); @@ -389,16 +390,17 @@ impl Pallet { // low-level method of local storage API, which means that only one worker // will be able to "acquire a lock" and send a transaction if multiple workers // happen to be executed concurrently. - let res = val.mutate(|last_send: Result, StorageRetrievalError>| { - match last_send { - // If we already have a value in storage and the block number is recent enough - // we avoid sending another transaction at this time. - Ok(Some(block)) if block_number < block + T::GracePeriod::get() => - Err(RECENTLY_SENT), - // In every other case we attempt to acquire the lock and send a transaction. - _ => Ok(block_number), - } - }); + let res = + val.mutate(|last_send: Result>, StorageRetrievalError>| { + match last_send { + // If we already have a value in storage and the block number is recent enough + // we avoid sending another transaction at this time. + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => + Err(RECENTLY_SENT), + // In every other case we attempt to acquire the lock and send a transaction. + _ => Ok(block_number), + } + }); // The result of `mutate` call will give us a nested `Result` type. // The first one matches the return of the closure passed to `mutate`, i.e. @@ -419,9 +421,9 @@ impl Pallet { let transaction_type = block_number % 4u32.into(); if transaction_type == Zero::zero() { TransactionType::Signed - } else if transaction_type == T::BlockNumber::from(1u32) { + } else if transaction_type == BlockNumberFor::::from(1u32) { TransactionType::UnsignedForAny - } else if transaction_type == T::BlockNumber::from(2u32) { + } else if transaction_type == BlockNumberFor::::from(2u32) { TransactionType::UnsignedForAll } else { TransactionType::Raw @@ -472,7 +474,9 @@ impl Pallet { } /// A helper function to fetch the price and send a raw unsigned transaction. - fn fetch_price_and_send_raw_unsigned(block_number: T::BlockNumber) -> Result<(), &'static str> { + fn fetch_price_and_send_raw_unsigned( + block_number: BlockNumberFor, + ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); @@ -505,7 +509,7 @@ impl Pallet { /// A helper function to fetch the price, sign payload and send an unsigned transaction fn fetch_price_and_send_unsigned_for_any_account( - block_number: T::BlockNumber, + block_number: BlockNumberFor, ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. @@ -535,7 +539,7 @@ impl Pallet { /// A helper function to fetch the price, sign payload and send an unsigned transaction fn fetch_price_and_send_unsigned_for_all_accounts( - block_number: T::BlockNumber, + block_number: BlockNumberFor, ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. @@ -669,7 +673,7 @@ impl Pallet { } fn validate_transaction_parameters( - block_number: &T::BlockNumber, + block_number: &BlockNumberFor, new_price: &u32, ) -> TransactionValidity { // Now let's check if the transaction has any chance to succeed. diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 3df7f4a8d5439..203a59a8af03c 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -30,22 +30,18 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ - testing::{Header, TestXt}, + testing::TestXt, traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; // For testing the module, we construct a mock runtime. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Example: example_offchain_worker::{Pallet, Call, Storage, Event, ValidateUnsigned}, } ); @@ -57,13 +53,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -274,7 +269,7 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let signature_valid = ::Public, - ::BlockNumber, + frame_system::pallet_prelude::BlockNumberFor, > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); @@ -328,7 +323,7 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let signature_valid = ::Public, - ::BlockNumber, + frame_system::pallet_prelude::BlockNumberFor, > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); diff --git a/frame/examples/split/Cargo.toml b/frame/examples/split/Cargo.toml new file mode 100644 index 0000000000000..3ef5b5a070be4 --- /dev/null +++ b/frame/examples/split/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "pallet-example-split" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example splitted pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } + +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../../benchmarking" } + +[dev-dependencies] +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } + +[features] +default = ["std"] +std = [ + "codec/std", + "log/std", + "scale-info/std", + + "frame-support/std", + "frame-system/std", + + "sp-io/std", + "sp-std/std", + + "frame-benchmarking?/std", + "sp-core/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime" +] diff --git a/frame/examples/split/README.md b/frame/examples/split/README.md new file mode 100644 index 0000000000000..413ce9b913cb9 --- /dev/null +++ b/frame/examples/split/README.md @@ -0,0 +1,10 @@ + +# Basic Example For Splitting A Pallet +A simple example of a FRAME pallet demonstrating the ability to split sections across multiple +files. + +Note that this is purely experimental at this point. + +Run `cargo doc --package pallet-example-split --open` to view this pallet's documentation. + +License: MIT-0 diff --git a/frame/examples/split/src/benchmarking.rs b/frame/examples/split/src/benchmarking.rs new file mode 100644 index 0000000000000..5a50300937203 --- /dev/null +++ b/frame/examples/split/src/benchmarking.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for pallet-example-split + +// Only enable this module for benchmarking. +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn do_something() { + let value = 100u32.into(); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + do_something(RawOrigin::Signed(caller), value); + + assert_eq!(Something::::get(), Some(value)); + } + + #[benchmark] + fn cause_error() { + Something::::put(100u32); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + cause_error(RawOrigin::Signed(caller)); + + assert_eq!(Something::::get(), Some(101u32)); + } + + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/examples/split/src/events.rs b/frame/examples/split/src/events.rs new file mode 100644 index 0000000000000..7560766bacb33 --- /dev/null +++ b/frame/examples/split/src/events.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::pallet_macros::*; + +/// A [`pallet_section`] that defines the events for a pallet. +/// This can later be imported into the pallet using [`import_section`]. +#[pallet_section] +mod events { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Event documentation should end with an array that provides descriptive names for event + /// parameters. [something, who] + SomethingStored { something: u32, who: T::AccountId }, + } +} diff --git a/frame/examples/split/src/lib.rs b/frame/examples/split/src/lib.rs new file mode 100644 index 0000000000000..74d2e0cc24b7b --- /dev/null +++ b/frame/examples/split/src/lib.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Split Example Pallet +//! +//! **This pallet serves as an example and is not meant to be used in production.** +//! +//! A FRAME pallet demonstrating the ability to split sections across multiple files. +//! +//! Note that this is purely experimental at this point. + +#![cfg_attr(not(feature = "std"), no_std)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +mod events; + +pub mod weights; +pub use weights::*; + +use frame_support::pallet_macros::*; + +/// Imports a [`pallet_section`] defined at [`events::events`]. +/// This brings the events defined in that section into the pallet's namespace. +#[import_section(events::events)] +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configure the pallet by specifying the parameters and types on which it depends. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Because this pallet emits events, it depends on the runtime's definition of an event. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; + } + + // The pallet's runtime storage items. + #[pallet::storage] + pub type Something = StorageValue<_, u32>; + + // Errors inform users that something went wrong. + #[pallet::error] + pub enum Error { + /// Error names should be descriptive. + NoneValue, + /// Errors should have helpful documentation associated with them. + StorageOverflow, + } + + // Dispatchable functions allows users to interact with the pallet and invoke state changes. + // These functions materialize as "extrinsics", which are often compared to transactions. + // Dispatchable functions must be annotated with a weight and must return a DispatchResult. + #[pallet::call] + impl Pallet { + /// An example dispatchable that takes a singles value as a parameter, writes the value to + /// storage and emits an event. This function must be dispatched by a signed extrinsic. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::do_something())] + pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { + // Check that the extrinsic was signed and get the signer. + // This function will return an error if the extrinsic is not signed. + let who = ensure_signed(origin)?; + + // Update storage. + >::put(something); + + // Emit an event. + Self::deposit_event(Event::SomethingStored { something, who }); + // Return a successful DispatchResultWithPostInfo + Ok(()) + } + + /// An example dispatchable that may throw a custom error. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::cause_error())] + pub fn cause_error(origin: OriginFor) -> DispatchResult { + let _who = ensure_signed(origin)?; + + // Read a value from storage. + match >::get() { + // Return an error if the value has not been set. + None => return Err(Error::::NoneValue.into()), + Some(old) => { + // Increment the value read from storage; will error in the event of overflow. + let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; + // Update the value in storage with the incremented result. + >::put(new); + Ok(()) + }, + } + } + } +} diff --git a/frame/examples/split/src/mock.rs b/frame/examples/split/src/mock.rs new file mode 100644 index 0000000000000..bee3633ef68f2 --- /dev/null +++ b/frame/examples/split/src/mock.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate as pallet_template; +use frame_support::{derive_impl, sp_runtime::BuildStorage}; +use sp_core::ConstU64; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + TemplatePallet: pallet_template, + } +); + +/// Using a default config for [`frame_system`] in tests. See `default-config` example for more +/// details. +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type BlockHashCount = ConstU64<10>; + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); +} + +impl pallet_template::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::::default().build_storage().unwrap().into() +} diff --git a/frame/examples/split/src/tests.rs b/frame/examples/split/src/tests.rs new file mode 100644 index 0000000000000..1d4b6dfcff9d5 --- /dev/null +++ b/frame/examples/split/src/tests.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{mock::*, Error, Event, Something}; +use frame_support::{assert_noop, assert_ok}; + +#[test] +fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + // Go past genesis block so events get deposited + System::set_block_number(1); + // Dispatch a signed extrinsic. + assert_ok!(TemplatePallet::do_something(RuntimeOrigin::signed(1), 42)); + // Read pallet storage and assert an expected result. + assert_eq!(Something::::get(), Some(42)); + // Assert that the correct event was deposited + System::assert_last_event(Event::SomethingStored { something: 42, who: 1 }.into()); + }); +} + +#[test] +fn correct_error_for_none_value() { + new_test_ext().execute_with(|| { + // Ensure the expected error is thrown when no value is present. + assert_noop!( + TemplatePallet::cause_error(RuntimeOrigin::signed(1)), + Error::::NoneValue + ); + }); +} diff --git a/frame/examples/split/src/weights.rs b/frame/examples/split/src/weights.rs new file mode 100644 index 0000000000000..4219ce1e2697c --- /dev/null +++ b/frame/examples/split/src/weights.rs @@ -0,0 +1,91 @@ + +//! Autogenerated weights for pallet_template +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ../../target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_template +// --extrinsic +// * +// --steps=50 +// --repeat=20 +// --execution=wasm +// --wasm-execution=compiled +// --output +// pallets/template/src/weights.rs +// --template +// ../../.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn do_something() -> Weight; + fn cause_error() -> Weight; +} + +/// Weights for pallet_template using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: TemplatePallet Something (r:0 w:1) + /// Proof: TemplatePallet Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: TemplatePallet Something (r:1 w:1) + /// Proof: TemplatePallet Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: TemplatePallet Something (r:0 w:1) + /// Proof: TemplatePallet Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: TemplatePallet Something (r:1 w:1) + /// Proof: TemplatePallet Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/frame/examples/src/lib.rs b/frame/examples/src/lib.rs new file mode 100644 index 0000000000000..d1cd32bb50f26 --- /dev/null +++ b/frame/examples/src/lib.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # FRAME Pallet Examples +//! +//! This crate contains examples of FRAME pallets. It is not intended to be used in production. +//! +//! ## Pallets +//! +//! - [**`pallet_example_basic`**](./basic): A simple example of a FRAME pallet demonstrating +//! concepts, APIs and structures common to most FRAME runtimes. +//! +//! - [**`pallet_example_offchain_worker`**](./offchain-worker): A simple example of a FRAME pallet +//! demonstrating concepts, APIs and structures common to most offchain workers. +//! +//! - [**`pallet-default-config-example`**](./default-config): A simple example of a FRAME pallet +//! demonstrating the simpler way to implement `Config` trait of pallets. +//! +//! - [**`pallet-dev-mode`**](./dev-mode): A simple example of a FRAME pallet demonstrating the ease +//! of requirements for a pallet in dev mode. +//! +//! - [**`pallet-example-kitchensink`**](./kitchensink): A simple example of a FRAME pallet +//! demonstrating a catalog of the the FRAME macros and their various syntax options. +//! +//! - [**`pallet-example-split`**](./split): A simple example of a FRAME pallet demonstrating the +//! ability to split sections across multiple files. diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index ed661b8ac9493..fa466dd2041ea 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -13,27 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../try-runtime", optional = true } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-tracing = { version = "6.0.0", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-tracing = { version = "10.0.0", default-features = false, path = "../../primitives/tracing" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } -sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-version = { version = "22.0.0", path = "../../primitives/version" } [features] default = ["std"] @@ -42,11 +42,23 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", + "frame-try-runtime/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", "sp-tracing/std", + "pallet-balances/std", + "pallet-transaction-payment/std", + "sp-inherents/std", + "sp-version/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-try-runtime/try-runtime", + "sp-runtime/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-transaction-payment/try-runtime" ] -try-runtime = ["frame-support/try-runtime", "frame-try-runtime/try-runtime", "sp-runtime/try-runtime"] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index fd76fefadff4b..4e24717a39e93 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -126,6 +126,7 @@ use frame_support::{ }, weights::Weight, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ generic::Digest, traits::{ @@ -137,6 +138,9 @@ use sp_runtime::{ }; use sp_std::{marker::PhantomData, prelude::*}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + #[allow(dead_code)] const LOG_TARGET: &str = "runtime::executive"; @@ -175,14 +179,17 @@ pub struct Executive< impl< System: frame_system::Config + EnsureInherentsAreFirst, - Block: traits::Block
, + Block: traits::Block< + Header = frame_system::pallet_prelude::HeaderFor, + Hash = System::Hash, + >, Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade - + OnInitialize - + OnIdle - + OnFinalize - + OffchainWorker, + + OnInitialize> + + OnIdle> + + OnFinalize> + + OffchainWorker>, COnRuntimeUpgrade: OnRuntimeUpgrade, > ExecuteBlock for Executive @@ -209,15 +216,18 @@ where #[cfg(feature = "try-runtime")] impl< System: frame_system::Config + EnsureInherentsAreFirst, - Block: traits::Block
, + Block: traits::Block< + Header = frame_system::pallet_prelude::HeaderFor, + Hash = System::Hash, + >, Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade - + OnInitialize - + OnIdle - + OnFinalize - + OffchainWorker - + frame_support::traits::TryState, + + OnInitialize> + + OnIdle> + + OnFinalize> + + OffchainWorker> + + frame_support::traits::TryState>, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -294,10 +304,9 @@ where // run the try-state checks of all pallets, ensuring they don't alter any state. let _guard = frame_support::StorageNoopGuard::default(); - >::try_state( - *header.number(), - select, - ) + , + >>::try_state(*header.number(), select) .map_err(|e| { frame_support::log::error!(target: LOG_TARGET, "failure: {:?}", e); e @@ -329,6 +338,12 @@ where ); } + frame_support::log::info!( + target: LOG_TARGET, + "try-runtime: Block #{:?} successfully executed", + header.number(), + ); + Ok(frame_system::Pallet::::block_weight().total()) } @@ -338,10 +353,12 @@ where /// `true`. Also, if set to `true`, it runs the `pre_upgrade` and `post_upgrade` hooks. pub fn try_runtime_upgrade( checks: frame_try_runtime::UpgradeCheckSelect, - ) -> Result { + ) -> Result { if checks.try_state() { let _guard = frame_support::StorageNoopGuard::default(); - >::try_state( + , + >>::try_state( frame_system::Pallet::::block_number(), frame_try_runtime::TryStateSelect::All, )?; @@ -354,7 +371,9 @@ where if checks.try_state() { let _guard = frame_support::StorageNoopGuard::default(); - >::try_state( + , + >>::try_state( frame_system::Pallet::::block_number(), frame_try_runtime::TryStateSelect::All, )?; @@ -366,14 +385,17 @@ where impl< System: frame_system::Config + EnsureInherentsAreFirst, - Block: traits::Block
, + Block: traits::Block< + Header = frame_system::pallet_prelude::HeaderFor, + Hash = System::Hash, + >, Context: Default, UnsignedValidator, AllPalletsWithSystem: OnRuntimeUpgrade - + OnInitialize - + OnIdle - + OnFinalize - + OffchainWorker, + + OnInitialize> + + OnIdle> + + OnFinalize> + + OffchainWorker>, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -390,14 +412,14 @@ where } /// Start the execution of a particular block. - pub fn initialize_block(header: &System::Header) { + pub fn initialize_block(header: &frame_system::pallet_prelude::HeaderFor) { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(header); Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); } - fn extract_pre_digest(header: &System::Header) -> Digest { + fn extract_pre_digest(header: &frame_system::pallet_prelude::HeaderFor) -> Digest { let mut digest = ::default(); header.digest().logs().iter().for_each(|d| { if d.as_pre_runtime().is_some() { @@ -408,7 +430,7 @@ where } fn initialize_block_impl( - block_number: &System::BlockNumber, + block_number: &BlockNumberFor, parent_hash: &System::Hash, digest: &Digest, ) { @@ -423,7 +445,7 @@ where } >::initialize(block_number, parent_hash, digest); weight = weight.saturating_add(, >>::on_initialize(*block_number)); weight = weight.saturating_add( >::get().base_block, @@ -458,8 +480,8 @@ where // Check that `parent_hash` is correct. let n = *header.number(); assert!( - n > System::BlockNumber::zero() && - >::block_hash(n - System::BlockNumber::one()) == + n > BlockNumberFor::::zero() && + >::block_hash(n - BlockNumberFor::::one()) == *header.parent_hash(), "Parent hash should be valid.", ); @@ -509,7 +531,7 @@ where /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. - pub fn finalize_block() -> System::Header { + pub fn finalize_block() -> frame_system::pallet_prelude::HeaderFor { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); >::note_finished_extrinsics(); @@ -526,7 +548,7 @@ where let remaining_weight = max_weight.saturating_sub(weight.total()); if remaining_weight.all_gt(Weight::zero()) { - let used_weight = >::on_idle( + let used_weight = >>::on_idle( block_number, remaining_weight, ); @@ -536,7 +558,7 @@ where ); } - >::on_finalize(block_number); + >>::on_finalize(block_number); } /// Apply extrinsic outside of the block execution function. @@ -576,7 +598,7 @@ where Ok(r.map(|_| ()).map_err(|e| e.error)) } - fn final_checks(header: &System::Header) { + fn final_checks(header: &frame_system::pallet_prelude::HeaderFor) { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); // remove temporaries let new_header = >::finalize(); @@ -648,7 +670,7 @@ where } /// Start an offchain worker and generate extrinsics. - pub fn offchain_worker(header: &System::Header) { + pub fn offchain_worker(header: &frame_system::pallet_prelude::HeaderFor) { sp_io::init_tracing(); // We need to keep events available for offchain workers, // hence we initialize the block manually. @@ -662,7 +684,7 @@ where // as well. frame_system::BlockHash::::insert(header.number(), header.hash()); - >::offchain_worker( + >>::offchain_worker( *header.number(), ) } @@ -680,7 +702,7 @@ mod tests { transaction_validity::{ InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, }, - DispatchError, + BuildStorage, DispatchError, }; use frame_support::{ @@ -709,17 +731,17 @@ mod tests { impl Hooks> for Pallet { // module hooks. // one with block number arg and one without - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { println!("on_initialize({})", n); Weight::from_parts(175, 0) } - fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { + fn on_idle(n: BlockNumberFor, remaining_weight: Weight) -> Weight { println!("on_idle{}, {})", n, remaining_weight); Weight::from_parts(175, 0) } - fn on_finalize(n: T::BlockNumber) { + fn on_finalize(n: BlockNumberFor) { println!("on_finalize({})", n); } @@ -728,8 +750,8 @@ mod tests { Weight::from_parts(200, 0) } - fn offchain_worker(n: T::BlockNumber) { - assert_eq!(T::BlockNumber::from(1u32), n); + fn offchain_worker(n: BlockNumberFor) { + assert_eq!(BlockNumberFor::::from(1u32), n); } } @@ -819,12 +841,9 @@ mod tests { } frame_support::construct_runtime!( - pub struct Runtime where - Block = TestBlock, - NodeBlock = TestBlock, - UncheckedExtrinsic = TestUncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, @@ -849,14 +868,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; - type BlockNumber = u64; type Hash = sp_core::H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = TestBlock; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = RuntimeVersion; @@ -883,7 +901,7 @@ mod tests { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = ConstU32<1>; - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = ConstU32<1>; } @@ -920,7 +938,6 @@ mod tests { ); type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; - type TestUncheckedExtrinsic = TestXt; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; @@ -963,7 +980,7 @@ mod tests { #[test] fn balance_transfer_dispatch_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } .assimilate_storage(&mut t) .unwrap(); @@ -991,7 +1008,7 @@ mod tests { } fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } .assimilate_storage(&mut t) .unwrap(); @@ -999,7 +1016,7 @@ mod tests { } fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } .assimilate_storage(&mut t) .unwrap(); diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 93fceefa2de51..8b48537843df1 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -12,26 +12,28 @@ description = "FRAME fast unstake pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } sp-staking = { default-features = false, path = "../../primitives/staking" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +docify = "0.2.1" + [dev-dependencies] pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } pallet-staking = { path = "../staking" } pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } @@ -54,10 +56,29 @@ std = [ "frame-election-provider-support/std", "frame-benchmarking/std", + "pallet-balances/std", + "pallet-staking/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-tracing/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-staking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 3e79bf4077d20..39783271e6569 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -15,37 +15,100 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! A pallet that's designed to JUST do the following: +//! > Made with *Substrate*, for *Polkadot*. //! -//! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any -//! validators in the last `BondingDuration` days"), then they can register themselves in this -//! pallet, unstake faster than having to wait an entire bonding duration. +//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) - +//! [![polkadot]](https://polkadot.network) //! -//! Appearing in the exposure of a validator means being exposed equal to that validator from the -//! point of view of the staking system. This usually means earning rewards with the validator, and -//! also being at the risk of slashing with the validator. This is equivalent to the "Active -//! Nominator" role explained in the -//! [February Staking Update](https://polkadot.network/blog/staking-update-february-2022/). +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! -//! This pallet works off the basis of `on_idle`, meaning that it provides no guarantee about when -//! it will succeed, if at all. Moreover, the queue implementation is unordered. In case of -//! congestion, no FIFO ordering is provided. +//! # Fast Unstake Pallet +//! +//! A pallet to allow participants of the staking system (represented by [`Config::Staking`], being +//! [`StakingInterface`]) to unstake quicker, if and only if they meet the condition of not being +//! exposed to any slashes. +//! +//! ## Overview +//! +//! If a nominator is not exposed anywhere in the staking system, checked via +//! [`StakingInterface::is_exposed_in_era`] (i.e. "has not actively backed any validators in the +//! last [`StakingInterface::bonding_duration`] days"), then they can register themselves in this +//! pallet and unstake faster than having to wait an entire bonding duration. +//! +//! *Being exposed with validator* from the point of view of the staking system means earning +//! rewards with the validator, and also being at the risk of slashing with the validator. This is +//! equivalent to the "Active Nominator" role explained in +//! [here](https://polkadot.network/blog/staking-update-february-2022/). //! //! Stakers who are certain about NOT being exposed can register themselves with -//! [`Call::register_fast_unstake`]. This will chill, and fully unbond the staker, and place them in -//! the queue to be checked. +//! [`Pallet::register_fast_unstake`]. This will chill, fully unbond the staker and place them +//! in the queue to be checked. +//! +//! A successful registration implies being fully unbonded and chilled in the staking system. These +//! effects persist even if the fast-unstake registration is retracted (see [`Pallet::deregister`] +//! and further). //! -//! Once queued, but not being actively processed, stakers can withdraw their request via -//! [`Call::deregister`]. +//! Once registered as a fast-unstaker, the staker will be queued and checked by the system. This +//! can take a variable number of blocks based on demand, but will almost certainly be "faster" (as +//! the name suggest) than waiting the standard bonding duration. //! -//! Once queued, a staker wishing to unbond can perform no further action in pallet-staking. This is -//! to prevent them from accidentally exposing themselves behind a validator etc. +//! A fast-unstaker is either in [`Queue`] or actively being checked, at which point it lives in +//! [`Head`]. Once in [`Head`], the request cannot be retracted anymore. But, once in [`Queue`], it +//! can, via [`Pallet::deregister`]. +//! +//! A deposit equal to [`Config::Deposit`] is collected for this process, and is returned in case a +//! successful unstake occurs (`Event::Unstaked` signals that). //! //! Once processed, if successful, no additional fee for the checking process is taken, and the //! staker is instantly unbonded. //! -//! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras -//! they will end up being slashed for the amount of wasted work they have inflicted on the chian. +//! If unsuccessful, meaning that the staker was exposed, the aforementioned deposit will be slashed +//! for the amount of wasted work they have inflicted on the chain. +//! +//! All in all, this pallet is meant to provide an easy off-ramp for some stakers. +//! +//! ### Example +//! +//! 1. Fast-unstake with multiple participants in the queue. +#![doc = docify::embed!("src/tests.rs", successful_multi_queue)] +//! +//! 2. Fast unstake failing because a nominator is exposed. +#![doc = docify::embed!("src/tests.rs", exposed_nominator_cannot_unstake)] +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! ## Low Level / Implementation Details +//! +//! This pallet works off the basis of `on_idle`, meaning that it provides no guarantee about when +//! it will succeed, if at all. Moreover, the queue implementation is unordered. In case of +//! congestion, no FIFO ordering is provided. +//! +//! A few important considerations can be concluded based on the `on_idle`-based implementation: +//! +//! * It is crucial for the weights of this pallet to be correct. The code inside +//! [`Pallet::on_idle`] MUST be able to measure itself and report the remaining weight correctly +//! after execution. +//! +//! * If the weight measurement is incorrect, it can lead to perpetual overweight (consequently +//! slow) blocks. +//! +//! * The amount of weight that `on_idle` consumes is a direct function of [`ErasToCheckPerBlock`]. +//! +//! * Thus, a correct value of [`ErasToCheckPerBlock`] (which can be set via [`Pallet::control`]) +//! should be chosen, such that a reasonable amount of weight is used `on_idle`. If +//! [`ErasToCheckPerBlock`] is too large, `on_idle` will always conclude that it has not enough +//! weight to proceed, and will early-return. Nonetheless, this should also be *safe* as long as +//! the benchmarking/weights are *accurate*. +//! +//! * See the inline code-comments on `do_on_idle` (private) for more details. +//! +//! * For further safety, in case of any unforeseen errors, the pallet will emit +//! [`Event::InternalError`] and set [`ErasToCheckPerBlock`] back to 0, which essentially means +//! the pallet will halt/disable itself. #![cfg_attr(not(feature = "std"), no_std)] @@ -64,9 +127,15 @@ pub mod migrations; pub mod types; pub mod weights; +// some extra imports for docs to link properly. +#[cfg(doc)] +pub use frame_support::traits::Hooks; +#[cfg(doc)] +pub use sp_staking::StakingInterface; + +/// The logging target of this pallet. pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; -// syntactic sugar for logging. #[macro_export] macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { @@ -91,15 +160,8 @@ pub mod pallet { use sp_std::{prelude::*, vec::Vec}; pub use weights::WeightInfo; - #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] - #[codec(mel_bound(T: Config))] - #[scale_info(skip_type_params(T))] - pub struct MaxChecking(sp_std::marker::PhantomData); - impl frame_support::traits::Get for MaxChecking { - fn get() -> u32 { - T::Staking::bonding_duration() + 1 - } - } + #[cfg(feature = "try-runtime")] + use sp_runtime::TryRuntimeError; const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -122,7 +184,7 @@ pub mod pallet { #[pallet::constant] type Deposit: Get>; - /// The origin that can control this pallet. + /// The origin that can control this pallet, in other words invoke [`Pallet::control`]. type ControlOrigin: frame_support::traits::EnsureOrigin; /// Batch size. @@ -133,40 +195,45 @@ pub mod pallet { /// The access to staking functionality. type Staking: StakingInterface, AccountId = Self::AccountId>; + /// Maximum value for `ErasToCheckPerBlock`, checked in [`Pallet::control`]. + /// + /// This should be slightly bigger than the actual value in order to have accurate + /// benchmarks. + type MaxErasToCheckPerBlock: Get; + /// The weight information of this pallet. type WeightInfo: WeightInfo; - /// Maximum value for `ErasToCheckPerBlock`. This should be as close as possible, but more - /// than the actual value, in order to have accurate benchmarks. - type MaxErasToCheckPerBlock: Get; - /// Use only for benchmarking. #[cfg(feature = "runtime-benchmarks")] type MaxBackersPerValidator: Get; } /// The current "head of the queue" being unstaked. + /// + /// The head in itself can be a batch of up to [`Config::BatchSize`] stakers. #[pallet::storage] pub type Head = StorageValue<_, UnstakeRequest, OptionQuery>; /// The map of all accounts wishing to be unstaked. /// /// Keeps track of `AccountId` wishing to unstake and it's corresponding deposit. - /// - /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + // Hasher: Twox safe since `AccountId` is a secure hash. #[pallet::storage] pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, BalanceOf>; /// Number of eras to check per block. /// - /// If set to 0, this pallet does absolutely nothing. + /// If set to 0, this pallet does absolutely nothing. Cannot be set to more than + /// [`Config::MaxErasToCheckPerBlock`]. /// - /// Based on the amount of weight available at `on_idle`, up to this many eras of a single - /// nominator might be checked. + /// Based on the amount of weight available at [`Pallet::on_idle`], up to this many eras are + /// checked. The checking is represented by updating [`UnstakeRequest::checked`], which is + /// stored in [`Head`]. #[pallet::storage] + #[pallet::getter(fn eras_to_check_per_block)] pub type ErasToCheckPerBlock = StorageValue<_, u32, ValueQuery>; - /// The events of this pallet. #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -174,8 +241,6 @@ pub mod pallet { Unstaked { stash: T::AccountId, result: DispatchResult }, /// A staker was slashed for requesting fast-unstake whilst being exposed. Slashed { stash: T::AccountId, amount: BalanceOf }, - /// An internal error happened. Operations will be paused now. - InternalError, /// A batch was partially checked for the given eras, but the process did not finish. BatchChecked { eras: Vec }, /// A batch of a given size was terminated. @@ -183,6 +248,8 @@ pub mod pallet { /// This is always follows by a number of `Unstaked` or `Slashed` events, marking the end /// of the batch. A new batch will be created upon next block. BatchFinished { size: u32 }, + /// An internal error happened. Operations will be paused now. + InternalError, } #[pallet::error] @@ -205,8 +272,8 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks for Pallet { - fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { + impl Hooks> for Pallet { + fn on_idle(_: BlockNumberFor, remaining_weight: Weight) -> Weight { if remaining_weight.any_lt(T::DbWeight::get().reads(2)) { return Weight::from_parts(0, 0) } @@ -215,23 +282,19 @@ pub mod pallet { } fn integrity_test() { - sp_std::if_std! { - sp_io::TestExternalities::new_empty().execute_with(|| { - // ensure that the value of `ErasToCheckPerBlock` is less than - // `T::MaxErasToCheckPerBlock`. - assert!( - ErasToCheckPerBlock::::get() <= T::MaxErasToCheckPerBlock::get(), - "the value of `ErasToCheckPerBlock` is greater than `T::MaxErasToCheckPerBlock`", - ); - }); - } + // Ensure that the value of `ErasToCheckPerBlock` is less or equal to + // `T::MaxErasToCheckPerBlock`. + assert!( + ErasToCheckPerBlock::::get() <= T::MaxErasToCheckPerBlock::get(), + "the value of `ErasToCheckPerBlock` is greater than `T::MaxErasToCheckPerBlock`", + ); } #[cfg(feature = "try-runtime")] - fn try_state(_n: T::BlockNumber) -> Result<(), &'static str> { + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { // ensure that the value of `ErasToCheckPerBlock` is less than // `T::MaxErasToCheckPerBlock`. - assert!( + ensure!( ErasToCheckPerBlock::::get() <= T::MaxErasToCheckPerBlock::get(), "the value of `ErasToCheckPerBlock` is greater than `T::MaxErasToCheckPerBlock`", ); @@ -244,8 +307,12 @@ pub mod pallet { impl Pallet { /// Register oneself for fast-unstake. /// - /// The dispatch origin of this call must be signed by the controller account, similar to - /// `staking::unbond`. + /// ## Dispatch Origin + /// + /// The dispatch origin of this call must be *signed* by whoever is permitted to call + /// unbond funds by the staking system. See [`Config::Staking`]. + /// + /// ## Details /// /// The stash associated with the origin must have no ongoing unlocking chunks. If /// successful, this will fully unbond and chill the stash. Then, it will enqueue the stash @@ -260,6 +327,10 @@ pub mod pallet { /// If the check fails, the stash remains chilled and waiting for being unbonded as in with /// the normal staking system, but they lose part of their unbonding chunks due to consuming /// the chain's resources. + /// + /// ## Events + /// + /// Some events from the staking and currency system might be emitted. #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::register_fast_unstake())] pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { @@ -285,11 +356,22 @@ pub mod pallet { /// Deregister oneself from the fast-unstake. /// + /// ## Dispatch Origin + /// + /// The dispatch origin of this call must be *signed* by whoever is permitted to call + /// unbond funds by the staking system. See [`Config::Staking`]. + /// + /// ## Details + /// /// This is useful if one is registered, they are still waiting, and they change their mind. /// /// Note that the associated stash is still fully unbonded and chilled as a consequence of - /// calling `register_fast_unstake`. This should probably be followed by a call to - /// `Staking::rebond`. + /// calling [`Pallet::register_fast_unstake`]. Therefore, this should probably be followed + /// by a call to `rebond` in the staking system. + /// + /// ## Events + /// + /// Some events from the staking and currency system might be emitted. #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor) -> DispatchResult { @@ -315,7 +397,17 @@ pub mod pallet { /// Control the operation of this pallet. /// - /// Dispatch origin must be signed by the [`Config::ControlOrigin`]. + /// ## Dispatch Origin + /// + /// The dispatch origin of this call must be [`Config::ControlOrigin`]. + /// + /// ## Details + /// + /// Can set the number of eras to check per block, and potentially other admin work. + /// + /// ## Events + /// + /// No events are emitted from this dispatch. #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::control())] pub fn control(origin: OriginFor, eras_to_check: EraIndex) -> DispatchResult { diff --git a/frame/fast-unstake/src/migrations.rs b/frame/fast-unstake/src/migrations.rs index e5ef919298a4f..564388407045e 100644 --- a/frame/fast-unstake/src/migrations.rs +++ b/frame/fast-unstake/src/migrations.rs @@ -25,6 +25,11 @@ pub mod v1 { use sp_staking::EraIndex; use sp_std::prelude::*; + #[cfg(feature = "try-runtime")] + use frame_support::ensure; + #[cfg(feature = "try-runtime")] + use sp_runtime::TryRuntimeError; + pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -65,14 +70,20 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(Pallet::::on_chain_storage_version(), 0); + fn pre_upgrade() -> Result, TryRuntimeError> { + ensure!( + Pallet::::on_chain_storage_version() == 0, + "The onchain storage version must be zero for the migration to execute." + ); Ok(Default::default()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { - assert_eq!(Pallet::::on_chain_storage_version(), 1); + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { + ensure!( + Pallet::::on_chain_storage_version() == 1, + "The onchain version must be updated after the migration." + ); Ok(()) } } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index d75c893807990..3a3d9e2d4b133 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -23,13 +23,16 @@ use frame_support::{ traits::{ConstU64, Currency}, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; -use sp_runtime::traits::{Convert, IdentityLookup}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + BuildStorage, +}; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::prelude::*; pub type AccountId = u128; -pub type AccountIndex = u32; +pub type Nonce = u32; pub type BlockNumber = u64; pub type Balance = u128; pub type T = Runtime; @@ -47,14 +50,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -91,7 +93,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -133,11 +135,10 @@ impl frame_election_provider_support::ElectionProvider for MockElection { } impl pallet_staking::Config for Runtime { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -156,8 +157,9 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; - type OnStakerSlash = (); + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -195,14 +197,9 @@ impl fast_unstake::Config for Runtime { } type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + frame_support::construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub struct Runtime { System: frame_system, Timestamp: pallet_timestamp, Balances: pallet_balances, @@ -278,7 +275,7 @@ impl ExtBuilder { pub(crate) fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = - frame_system::GenesisConfig::default().build_storage::().unwrap(); + frame_system::GenesisConfig::::default().build_storage().unwrap(); let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; let nominators_range = diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index c51c817ec6a74..94ad6a84b85a1 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -19,7 +19,7 @@ use super::*; use crate::{mock::*, types::*, Event}; -use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; +use frame_support::{pallet_prelude::*, testing_prelude::*, traits::Currency}; use pallet_staking::{CurrentEra, RewardDestination}; use sp_runtime::traits::BadOrigin; @@ -303,6 +303,7 @@ mod on_idle { }); } + #[docify::export] #[test] fn successful_multi_queue() { ExtBuilder::default().build_and_execute(|| { @@ -356,6 +357,7 @@ mod on_idle { }); } + #[docify::export] #[test] fn successful_unstake() { ExtBuilder::default().build_and_execute(|| { @@ -693,6 +695,7 @@ mod on_idle { }); } + #[docify::export] #[test] fn exposed_nominator_cannot_unstake() { ExtBuilder::default().build_and_execute(|| { diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 3ec4b3a9b4d6e..15d0a327e917e 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,25 +17,41 @@ //! Types used in the Fast Unstake pallet. -use crate::{Config, MaxChecking}; +use crate::Config; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ traits::Currency, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; -use sp_staking::EraIndex; +use sp_staking::{EraIndex, StakingInterface}; use sp_std::prelude::*; -pub type BalanceOf = +/// Maximum number of eras that we might check for a single staker. +/// +/// In effect, it is the bonding duration, coming from [`Config::Staking`], plus one. +#[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] +#[codec(mel_bound(T: Config))] +#[scale_info(skip_type_params(T))] +pub struct MaxChecking(sp_std::marker::PhantomData); +impl frame_support::traits::Get for MaxChecking { + fn get() -> u32 { + T::Staking::bonding_duration() + 1 + } +} + +pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// An unstake request. +/// +/// This is stored in [`crate::Head`] storage item and points to the current unstake request that is +/// being processed. #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, )] #[scale_info(skip_type_params(T))] pub struct UnstakeRequest { - /// This list of stashes being processed in this request, and their corresponding deposit. - pub(crate) stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize>, + /// This list of stashes are being processed in this request, and their corresponding deposit. + pub stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize>, /// The list of eras for which they have been checked. - pub(crate) checked: BoundedVec>, + pub checked: BoundedVec>, } diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs index 27414a8a8cc0d..9c25a409f7409 100644 --- a/frame/fast-unstake/src/weights.rs +++ b/frame/fast-unstake/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_fast_unstake //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_fast_unstake +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_fast_unstake. pub trait WeightInfo { @@ -91,12 +95,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1344 + b * (343 ±0)` + // Measured: `1378 + b * (343 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 92_282_000 picoseconds. - Weight::from_parts(31_665_344, 7253) - // Standard Error: 35_348 - .saturating_add(Weight::from_parts(57_005_152, 0).saturating_mul(b.into())) + // Minimum execution time: 92_847_000 picoseconds. + Weight::from_parts(42_300_813, 7253) + // Standard Error: 40_514 + .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -121,14 +125,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1512 + b * (48 ±0) + v * (10037 ±0)` + // Measured: `1546 + b * (48 ±0) + v * (10037 ±0)` // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` - // Minimum execution time: 1_547_716_000 picoseconds. - Weight::from_parts(1_552_476_000, 7253) - // Standard Error: 13_914_457 - .saturating_add(Weight::from_parts(445_314_876, 0).saturating_mul(v.into())) - // Standard Error: 55_673_329 - .saturating_add(Weight::from_parts(1_749_024_692, 0).saturating_mul(b.into())) + // Minimum execution time: 1_685_784_000 picoseconds. + Weight::from_parts(1_693_370_000, 7253) + // Standard Error: 13_295_842 + .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) + // Standard Error: 53_198_180 + .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -169,8 +173,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1964` // Estimated: `7253` - // Minimum execution time: 124_644_000 picoseconds. - Weight::from_parts(125_793_000, 7253) + // Minimum execution time: 125_512_000 picoseconds. + Weight::from_parts(129_562_000, 7253) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } @@ -188,8 +192,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1223` // Estimated: `7253` - // Minimum execution time: 45_037_000 picoseconds. - Weight::from_parts(45_545_000, 7253) + // Minimum execution time: 43_943_000 picoseconds. + Weight::from_parts(45_842_000, 7253) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -199,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_228_000 picoseconds. - Weight::from_parts(3_428_000, 0) + // Minimum execution time: 2_677_000 picoseconds. + Weight::from_parts(2_849_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -240,12 +244,12 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1344 + b * (343 ±0)` + // Measured: `1378 + b * (343 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 92_282_000 picoseconds. - Weight::from_parts(31_665_344, 7253) - // Standard Error: 35_348 - .saturating_add(Weight::from_parts(57_005_152, 0).saturating_mul(b.into())) + // Minimum execution time: 92_847_000 picoseconds. + Weight::from_parts(42_300_813, 7253) + // Standard Error: 40_514 + .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -270,14 +274,14 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1512 + b * (48 ±0) + v * (10037 ±0)` + // Measured: `1546 + b * (48 ±0) + v * (10037 ±0)` // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` - // Minimum execution time: 1_547_716_000 picoseconds. - Weight::from_parts(1_552_476_000, 7253) - // Standard Error: 13_914_457 - .saturating_add(Weight::from_parts(445_314_876, 0).saturating_mul(v.into())) - // Standard Error: 55_673_329 - .saturating_add(Weight::from_parts(1_749_024_692, 0).saturating_mul(b.into())) + // Minimum execution time: 1_685_784_000 picoseconds. + Weight::from_parts(1_693_370_000, 7253) + // Standard Error: 13_295_842 + .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) + // Standard Error: 53_198_180 + .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -318,8 +322,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1964` // Estimated: `7253` - // Minimum execution time: 124_644_000 picoseconds. - Weight::from_parts(125_793_000, 7253) + // Minimum execution time: 125_512_000 picoseconds. + Weight::from_parts(129_562_000, 7253) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } @@ -337,8 +341,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1223` // Estimated: `7253` - // Minimum execution time: 45_037_000 picoseconds. - Weight::from_parts(45_545_000, 7253) + // Minimum execution time: 43_943_000 picoseconds. + Weight::from_parts(45_842_000, 7253) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -348,8 +352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_228_000 picoseconds. - Weight::from_parts(3_428_000, 0) + // Minimum execution time: 2_677_000 picoseconds. + Weight::from_parts(2_849_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/glutton/Cargo.toml b/frame/glutton/Cargo.toml index bee0d3db625fb..afaa7fdcf973f 100644 --- a/frame/glutton/Cargo.toml +++ b/frame/glutton/Cargo.toml @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -41,11 +41,19 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" ] diff --git a/frame/glutton/README.md b/frame/glutton/README.md index bcd6c51a6fced..8ad4f79171820 100644 --- a/frame/glutton/README.md +++ b/frame/glutton/README.md @@ -1,6 +1,6 @@ # WARNING -Do not use on value-bearing chains. This pallet is **only** intended for usage on test-chains. +**DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.** # Glutton Pallet diff --git a/frame/glutton/src/benchmarking.rs b/frame/glutton/src/benchmarking.rs index 13576ae2f3d98..587207587456a 100644 --- a/frame/glutton/src/benchmarking.rs +++ b/frame/glutton/src/benchmarking.rs @@ -25,6 +25,7 @@ use super::*; use frame_benchmarking::benchmarks; use frame_support::{pallet_prelude::*, weights::constants::*}; use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::{traits::One, Perbill}; use crate::Pallet as Glutton; use frame_system::Pallet as System; @@ -67,8 +68,8 @@ benchmarks! { // For manual verification only. on_idle_high_proof_waste { (0..5000).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); - let _ = Glutton::::set_compute(SystemOrigin::Root.into(), Perbill::from_percent(100)); - let _ = Glutton::::set_storage(SystemOrigin::Root.into(), Perbill::from_percent(100)); + let _ = Glutton::::set_compute(SystemOrigin::Root.into(), One::one()); + let _ = Glutton::::set_storage(SystemOrigin::Root.into(), One::one()); }: { let weight = Glutton::::on_idle(System::::block_number(), Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_MB * 5)); } @@ -76,8 +77,8 @@ benchmarks! { // For manual verification only. on_idle_low_proof_waste { (0..5000).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); - let _ = Glutton::::set_compute(SystemOrigin::Root.into(), Perbill::from_percent(100)); - let _ = Glutton::::set_storage(SystemOrigin::Root.into(), Perbill::from_percent(100)); + let _ = Glutton::::set_compute(SystemOrigin::Root.into(), One::one()); + let _ = Glutton::::set_storage(SystemOrigin::Root.into(), One::one()); }: { let weight = Glutton::::on_idle(System::::block_number(), Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_KB * 20)); } @@ -89,10 +90,10 @@ benchmarks! { } set_compute { - }: _(SystemOrigin::Root, Perbill::from_percent(50)) + }: _(SystemOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))) set_storage { - }: _(SystemOrigin::Root, Perbill::from_percent(50)) + }: _(SystemOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))) impl_benchmark_test_suite!(Glutton, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/frame/glutton/src/lib.rs b/frame/glutton/src/lib.rs index e9a46374a5ade..5950bd676b378 100644 --- a/frame/glutton/src/lib.rs +++ b/frame/glutton/src/lib.rs @@ -15,12 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # WARNING +//! +//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.** +//! //! # Glutton Pallet //! //! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the //! `Compute` and `Storage` parameters the pallet consumes the adequate amount //! of weight. +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] @@ -32,22 +37,34 @@ mod tests; pub mod weights; use blake2::{Blake2b512, Digest}; -use frame_support::{pallet_prelude::*, weights::WeightMeter}; +use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound}; use frame_system::pallet_prelude::*; -use sp_runtime::{traits::Zero, Perbill}; +use sp_io::hashing::twox_256; +use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64}; use sp_std::{vec, vec::Vec}; pub use pallet::*; pub use weights::WeightInfo; +/// The size of each value in the `TrashData` storage in bytes. +pub const VALUE_SIZE: usize = 1024; +/// Max number of entries for the `TrashData` map. +pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000; +/// Hard limit for any other resource limit (in units). +pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10); + #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::config] pub trait Config: frame_system::Config { + /// The overarching event type. type RuntimeEvent: From + IsType<::RuntimeEvent>; + /// The admin origin that can set computational limits and initialize the pallet. + type AdminOrigin: EnsureOrigin; + /// Weight information for this pallet. type WeightInfo: WeightInfo; } @@ -58,12 +75,21 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// The pallet has been (re)initialized by root. - PalletInitialized { reinit: bool }, - /// The computation limit has been updated by root. - ComputationLimitSet { compute: Perbill }, - /// The storage limit has been updated by root. - StorageLimitSet { storage: Perbill }, + /// The pallet has been (re)initialized. + PalletInitialized { + /// Whether the pallet has been re-initialized. + reinit: bool, + }, + /// The computation limit has been updated. + ComputationLimitSet { + /// The computation limit. + compute: FixedU64, + }, + /// The storage limit has been updated. + StorageLimitSet { + /// The storage limit. + storage: FixedU64, + }, } #[pallet::error] @@ -72,17 +98,24 @@ pub mod pallet { /// /// Set `witness_count` to `Some` to bypass this error. AlreadyInitialized, + + /// The limit was over [`crate::RESOURCE_HARD_LIMIT`]. + InsaneLimit, } - /// Storage value used to specify what percentage of the left over `ref_time` - /// to consume during `on_idle`. + /// The proportion of the remaining `ref_time` to consume during `on_idle`. + /// + /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to + /// over `1.0` could stall the chain. #[pallet::storage] - pub(crate) type Compute = StorageValue<_, Perbill, ValueQuery>; + pub(crate) type Compute = StorageValue<_, FixedU64, ValueQuery>; - /// Storage value used the specify what percentage of left over `proof_size` - /// to consume during `on_idle`. + /// The proportion of the remaining `proof_size` to consume during `on_idle`. + /// + /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to + /// over `1.0` could stall the chain. #[pallet::storage] - pub(crate) type Storage = StorageValue<_, Perbill, ValueQuery>; + pub(crate) type Storage = StorageValue<_, FixedU64, ValueQuery>; /// Storage map used for wasting proof size. /// @@ -96,15 +129,51 @@ pub mod pallet { pub(super) type TrashData = StorageMap< Hasher = Twox64Concat, Key = u32, - Value = [u8; 1024], + Value = [u8; VALUE_SIZE], QueryKind = OptionQuery, - MaxValues = ConstU32<65_000>, + MaxValues = ConstU32, >; /// The current number of entries in `TrashData`. #[pallet::storage] pub(crate) type TrashDataCount = StorageValue<_, u32, ValueQuery>; + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + /// The compute limit. + pub compute: FixedU64, + /// The storage limit. + pub storage: FixedU64, + /// The amount of trash data for wasting proof size. + pub trash_data_count: u32, + #[serde(skip)] + /// The required configuration field. + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + assert!( + self.trash_data_count <= MAX_TRASH_DATA_ENTRIES, + "number of TrashData entries cannot be bigger than {:?}", + MAX_TRASH_DATA_ENTRIES + ); + + (0..self.trash_data_count) + .for_each(|i| TrashData::::insert(i, Pallet::::gen_value(i))); + + TrashDataCount::::set(self.trash_data_count); + + assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane"); + >::put(self.compute); + + assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane"); + >::put(self.storage); + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { @@ -120,13 +189,14 @@ pub mod pallet { fn on_idle(_: BlockNumberFor, remaining_weight: Weight) -> Weight { let mut meter = WeightMeter::from_limit(remaining_weight); - if !meter.check_accrue(T::WeightInfo::empty_on_idle()) { + if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() { return T::WeightInfo::empty_on_idle() } - let proof_size_limit = Storage::::get().mul_floor(meter.remaining().proof_size()); + let proof_size_limit = + Storage::::get().saturating_mul_int(meter.remaining().proof_size()); let computation_weight_limit = - Compute::::get().mul_floor(meter.remaining().ref_time()); + Compute::::get().saturating_mul_int(meter.remaining().ref_time()); let mut meter = WeightMeter::from_limit(Weight::from_parts( computation_weight_limit, proof_size_limit, @@ -135,15 +205,18 @@ pub mod pallet { Self::waste_at_most_proof_size(&mut meter); Self::waste_at_most_ref_time(&mut meter); - meter.consumed + meter.consumed() } } - #[pallet::call] + #[pallet::call(weight = T::WeightInfo)] impl Pallet { - /// Initializes the pallet by writing into `TrashData`. + /// Initialize the pallet. Should be called once, if no genesis state was provided. + /// + /// `current_count` is the current number of elements in `TrashData`. This can be set to + /// `None` when the pallet is first initialized. /// - /// Only callable by Root. A good default for `trash_count` is `5_000`. + /// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`. #[pallet::call_index(0)] #[pallet::weight( T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default()) @@ -154,7 +227,7 @@ pub mod pallet { new_count: u32, witness_count: Option, ) -> DispatchResult { - ensure_root(origin)?; + T::AdminOrigin::try_origin(origin).map(|_| ()).or_else(|o| ensure_root(o))?; let current_count = TrashDataCount::::get(); ensure!( @@ -163,7 +236,8 @@ pub mod pallet { ); if new_count > current_count { - (current_count..new_count).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); + (current_count..new_count) + .for_each(|i| TrashData::::insert(i, Self::gen_value(i))); } else { (new_count..current_count).for_each(TrashData::::remove); } @@ -173,28 +247,32 @@ pub mod pallet { Ok(()) } - /// Set the `Compute` storage value that determines how much of the - /// block's weight `ref_time` to use during `on_idle`. + /// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`. /// - /// Only callable by Root. + /// Only callable by Root or `AdminOrigin`. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::set_compute())] - pub fn set_compute(origin: OriginFor, compute: Perbill) -> DispatchResult { - ensure_root(origin)?; + pub fn set_compute(origin: OriginFor, compute: FixedU64) -> DispatchResult { + T::AdminOrigin::try_origin(origin).map(|_| ()).or_else(|o| ensure_root(o))?; + + ensure!(compute <= RESOURCE_HARD_LIMIT, Error::::InsaneLimit); Compute::::set(compute); Self::deposit_event(Event::ComputationLimitSet { compute }); Ok(()) } - /// Set the `Storage` storage value that determines the PoV size usage - /// for each block. + /// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`. + // + /// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking + /// results that are used here are likely an over-estimation. 100% intended consumption will + /// therefore translate to less than 100% actual consumption. /// - /// Only callable by Root. + /// Only callable by Root or `AdminOrigin`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_storage())] - pub fn set_storage(origin: OriginFor, storage: Perbill) -> DispatchResult { - ensure_root(origin)?; + pub fn set_storage(origin: OriginFor, storage: FixedU64) -> DispatchResult { + T::AdminOrigin::try_origin(origin).map(|_| ()).or_else(|o| ensure_root(o))?; + + ensure!(storage <= RESOURCE_HARD_LIMIT, Error::::InsaneLimit); Storage::::set(storage); Self::deposit_event(Event::StorageLimitSet { storage }); @@ -207,11 +285,9 @@ pub mod pallet { /// /// Tries to come as close to the limit as possible. pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) { - let Ok(n) = Self::calculate_proof_size_iters(&meter) else { - return; - }; + let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return }; - meter.defensive_saturating_accrue(T::WeightInfo::waste_proof_size_some(n)); + meter.consume(T::WeightInfo::waste_proof_size_some(n)); (0..n).for_each(|i| { TrashData::::get(i); @@ -239,10 +315,8 @@ pub mod pallet { /// /// Tries to come as close to the limit as possible. pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) { - let Ok(n) = Self::calculate_ref_time_iters(&meter) else { - return; - }; - meter.defensive_saturating_accrue(T::WeightInfo::waste_ref_time_iter(n)); + let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return }; + meter.consume(T::WeightInfo::waste_ref_time_iter(n)); let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n); @@ -251,7 +325,7 @@ pub mod pallet { // compiler does not know that (hopefully). debug_assert!(clobber.len() == 64); if clobber.len() == 65 { - TrashData::::insert(0, [clobber[0] as u8; 1024]); + TrashData::::insert(0, [clobber[0] as u8; VALUE_SIZE]); } } @@ -288,5 +362,17 @@ pub mod pallet { Some(i) => Ok(i as u32), } } + + /// Generate a pseudo-random deterministic value from a `seed`. + pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] { + let mut ret = [0u8; VALUE_SIZE]; + + for i in 0u32..(VALUE_SIZE as u32 / 32) { + let hash = (seed, i).using_encoded(twox_256); + ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash); + } + + ret + } } } diff --git a/frame/glutton/src/mock.rs b/frame/glutton/src/mock.rs index c8be354f48e28..c79ddd53718eb 100644 --- a/frame/glutton/src/mock.rs +++ b/frame/glutton/src/mock.rs @@ -18,23 +18,22 @@ use super::*; use crate as pallet_glutton; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + assert_ok, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Glutton: pallet_glutton::{Pallet, Event}, } ); @@ -45,14 +44,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -68,13 +66,22 @@ impl frame_system::Config for Test { impl Config for Test { type RuntimeEvent = RuntimeEvent; + type AdminOrigin = frame_system::EnsureRoot; type WeightInfo = (); } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } + +/// Set the `compute` and `storage` limits. +/// +/// `1.0` corresponds to `100%`. +pub fn set_limits(compute: f64, storage: f64) { + assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(compute))); + assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(storage))); +} diff --git a/frame/glutton/src/tests.rs b/frame/glutton/src/tests.rs index d75f2da5cb7ee..1897ff63a70fb 100644 --- a/frame/glutton/src/tests.rs +++ b/frame/glutton/src/tests.rs @@ -17,10 +17,13 @@ //! Tests for the glutton pallet. -use super::*; -use mock::{new_test_ext, Glutton, RuntimeOrigin, System, Test}; +use super::{mock::*, *}; use frame_support::{assert_err, assert_noop, assert_ok, weights::constants::*}; +use sp_runtime::{traits::One, Perbill}; + +const CALIBRATION_ERROR: &'static str = + "Weight calibration failed. Please re-run the benchmarks on the same hardware."; #[test] fn initialize_pallet_works() { @@ -43,8 +46,8 @@ fn initialize_pallet_works() { Error::::AlreadyInitialized ); - assert_eq!(TrashData::::get(0), Some([0; 1024])); - assert_eq!(TrashData::::get(1), Some([1; 1024])); + assert_eq!(TrashData::::get(0), Some(Pallet::::gen_value(0))); + assert_eq!(TrashData::::get(1), Some(Pallet::::gen_value(1))); assert_eq!(TrashData::::get(2), None); assert_eq!(TrashDataCount::::get(), 2); @@ -86,52 +89,81 @@ fn expand_and_shrink_trash_data_works() { #[test] fn setting_compute_works() { new_test_ext().execute_with(|| { - assert_eq!(Compute::::get(), Perbill::from_percent(0)); + assert_eq!(Compute::::get(), Zero::zero()); - assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), Perbill::from_percent(70))); - assert_eq!(Compute::::get(), Perbill::from_percent(70)); + assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(0.3))); + assert_eq!(Compute::::get(), FixedU64::from_float(0.3)); System::assert_last_event( - Event::ComputationLimitSet { compute: Perbill::from_percent(70) }.into(), + Event::ComputationLimitSet { compute: FixedU64::from_float(0.3) }.into(), ); assert_noop!( - Glutton::set_compute(RuntimeOrigin::signed(1), Perbill::from_percent(30)), + Glutton::set_compute(RuntimeOrigin::signed(1), FixedU64::from_float(0.5)), DispatchError::BadOrigin ); assert_noop!( - Glutton::set_compute(RuntimeOrigin::none(), Perbill::from_percent(30)), + Glutton::set_compute(RuntimeOrigin::none(), FixedU64::from_float(0.5)), DispatchError::BadOrigin ); }); } +#[test] +fn setting_compute_respects_limit() { + new_test_ext().execute_with(|| { + // < 1000% is fine + assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(9.99)),); + // == 1000% is fine + assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_u32(10)),); + // > 1000% is not + assert_noop!( + Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(10.01)), + Error::::InsaneLimit + ); + }); +} + #[test] fn setting_storage_works() { new_test_ext().execute_with(|| { - assert_eq!(Storage::::get(), Perbill::from_percent(0)); + assert!(Storage::::get().is_zero()); - assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), Perbill::from_percent(30))); - assert_eq!(Storage::::get(), Perbill::from_percent(30)); + assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(0.3))); + assert_eq!(Storage::::get(), FixedU64::from_float(0.3)); System::assert_last_event( - Event::StorageLimitSet { storage: Perbill::from_percent(30) }.into(), + Event::StorageLimitSet { storage: FixedU64::from_float(0.3) }.into(), ); assert_noop!( - Glutton::set_storage(RuntimeOrigin::signed(1), Perbill::from_percent(90)), + Glutton::set_storage(RuntimeOrigin::signed(1), FixedU64::from_float(0.5)), DispatchError::BadOrigin ); assert_noop!( - Glutton::set_storage(RuntimeOrigin::none(), Perbill::from_percent(90)), + Glutton::set_storage(RuntimeOrigin::none(), FixedU64::from_float(0.5)), DispatchError::BadOrigin ); }); } +#[test] +fn setting_storage_respects_limit() { + new_test_ext().execute_with(|| { + // < 1000% is fine + assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(9.99)),); + // == 1000% is fine + assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_u32(10)),); + // > 1000% is not + assert_noop!( + Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(10.01)), + Error::::InsaneLimit + ); + }); +} + #[test] fn on_idle_works() { new_test_ext().execute_with(|| { - assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), Perbill::from_percent(100))); - assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), Perbill::from_percent(100))); + set_limits(One::one(), One::one()); Glutton::on_idle(1, Weight::from_parts(20_000_000, 0)); }); @@ -141,8 +173,7 @@ fn on_idle_works() { #[test] fn on_idle_weight_high_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), Perbill::from_percent(100))); - assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), Perbill::from_percent(100))); + set_limits(One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_MB * 5); let got = Glutton::on_idle(1, should); @@ -151,15 +182,13 @@ fn on_idle_weight_high_proof_is_close_enough_works() { let ratio = Perbill::from_rational(got.proof_size(), should.proof_size()); assert!( ratio >= Perbill::from_percent(99), - "Too few proof size consumed, was only {:?} of expected", - ratio + "Too few proof size consumed, was only {ratio:?} of expected", ); let ratio = Perbill::from_rational(got.ref_time(), should.ref_time()); assert!( ratio >= Perbill::from_percent(99), - "Too few ref time consumed, was only {:?} of expected", - ratio + "Too few ref time consumed, was only {ratio:?} of expected", ); }); } @@ -167,26 +196,54 @@ fn on_idle_weight_high_proof_is_close_enough_works() { #[test] fn on_idle_weight_low_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), Perbill::from_percent(100))); - assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), Perbill::from_percent(100))); + set_limits(One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_KB * 20); let got = Glutton::on_idle(1, should); assert!(got.all_lte(should), "Consumed too much weight"); let ratio = Perbill::from_rational(got.proof_size(), should.proof_size()); - // Just a sanity check here. + // Just a sanity check here for > 0 assert!( ratio >= Perbill::from_percent(50), - "Too few proof size consumed, was only {:?} of expected", - ratio + "Too few proof size consumed, was only {ratio:?} of expected", ); let ratio = Perbill::from_rational(got.ref_time(), should.ref_time()); assert!( ratio >= Perbill::from_percent(99), - "Too few ref time consumed, was only {:?} of expected", - ratio + "Too few ref time consumed, was only {ratio:?} of expected", + ); + }); +} + +#[test] +fn on_idle_weight_over_unity_is_close_enough_works() { + new_test_ext().execute_with(|| { + // Para blocks get ~500ms compute and ~5MB proof size. + let max_block = + Weight::from_parts(500 * WEIGHT_REF_TIME_PER_MILLIS, 5 * WEIGHT_PROOF_SIZE_PER_MB); + // But now we tell it to consume more than that. + set_limits(1.75, 1.5); + let want = Weight::from_parts( + (1.75 * max_block.ref_time() as f64) as u64, + (1.5 * max_block.proof_size() as f64) as u64, + ); + + let consumed = Glutton::on_idle(1, max_block); + assert!(consumed.all_gt(max_block), "Must consume more than the block limit"); + assert!(consumed.all_lte(want), "Consumed more than the requested weight"); + + let ratio = Perbill::from_rational(consumed.proof_size(), want.proof_size()); + assert!( + ratio >= Perbill::from_percent(99), + "Too few proof size consumed, was only {ratio:?} of expected", + ); + + let ratio = Perbill::from_rational(consumed.ref_time(), want.ref_time()); + assert!( + ratio >= Perbill::from_percent(99), + "Too few ref time consumed, was only {ratio:?} of expected", ); }); } @@ -202,7 +259,7 @@ fn waste_at_most_ref_time_weight_close_enough() { // We require it to be under-spend by at most 1%. assert!( meter.consumed_ratio() >= Perbill::from_percent(99), - "Consumed too few: {:?}", + "{CALIBRATION_ERROR}\nConsumed too few: {:?}", meter.consumed_ratio() ); }); @@ -219,8 +276,19 @@ fn waste_at_most_proof_size_weight_close_enough() { // We require it to be under-spend by at most 1%. assert!( meter.consumed_ratio() >= Perbill::from_percent(99), - "Consumed too few: {:?}", + "{CALIBRATION_ERROR}\nConsumed too few: {:?}", meter.consumed_ratio() ); }); } + +#[test] +fn gen_value_works() { + let g0 = Pallet::::gen_value(0); + let g1 = Pallet::::gen_value(1); + + assert_eq!(g0.len(), VALUE_SIZE); + assert_ne!(g0, g1, "Is distinct"); + assert_ne!(g0, [0; VALUE_SIZE], "Is not zero"); + assert_eq!(g0, Pallet::::gen_value(0), "Is deterministic"); +} diff --git a/frame/glutton/src/weights.rs b/frame/glutton/src/weights.rs index 82bac91c6d785..cbc0fb022f510 100644 --- a/frame/glutton/src/weights.rs +++ b/frame/glutton/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_glutton //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_glutton +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_glutton. pub trait WeightInfo { @@ -69,12 +73,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4` + // Measured: `86` // Estimated: `1489` - // Minimum execution time: 10_410_000 picoseconds. - Weight::from_parts(10_515_000, 1489) - // Standard Error: 1_069 - .saturating_add(Weight::from_parts(1_513_013, 0).saturating_mul(n.into())) + // Minimum execution time: 11_488_000 picoseconds. + Weight::from_parts(93_073_710, 1489) + // Standard Error: 22_390 + .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -86,12 +90,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65` + // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_105_000 picoseconds. - Weight::from_parts(584_850, 1489) - // Standard Error: 1_417 - .saturating_add(Weight::from_parts(1_054_988, 0).saturating_mul(n.into())) + // Minimum execution time: 11_378_000 picoseconds. + Weight::from_parts(5_591_508, 1489) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -101,83 +105,83 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 709_000 picoseconds. - Weight::from_parts(7_409_096, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(95_342, 0).saturating_mul(i.into())) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(990_745, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) } /// Storage: Glutton TrashData (r:5000 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119036 + i * (1022 ±0)` + // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 584_000 picoseconds. - Weight::from_parts(674_000, 990) - // Standard Error: 1_802 - .saturating_add(Weight::from_parts(5_360_522, 0).saturating_mul(i.into())) + // Minimum execution time: 435_000 picoseconds. + Weight::from_parts(66_547_542, 990) + // Standard Error: 4_557 + .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton TrashData (r:1737 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900466` + // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 57_124_610_000 picoseconds. - Weight::from_parts(57_256_059_000, 5239782) + // Minimum execution time: 67_699_845_000 picoseconds. + Weight::from_parts(67_893_204_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton TrashData (r:5 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9516` + // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 101_500_066_000 picoseconds. - Weight::from_parts(101_621_640_000, 16070) + // Minimum execution time: 122_297_527_000 picoseconds. + Weight::from_parts(122_394_818_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `1489` - // Minimum execution time: 4_164_000 picoseconds. - Weight::from_parts(4_378_000, 1489) + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 5_882_000 picoseconds. + Weight::from_parts(6_138_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_795_000 picoseconds. - Weight::from_parts(9_076_000, 0) + // Minimum execution time: 7_830_000 picoseconds. + Weight::from_parts(8_366_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(9_195_000, 0) + // Minimum execution time: 7_933_000 picoseconds. + Weight::from_parts(8_213_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -191,12 +195,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4` + // Measured: `86` // Estimated: `1489` - // Minimum execution time: 10_410_000 picoseconds. - Weight::from_parts(10_515_000, 1489) - // Standard Error: 1_069 - .saturating_add(Weight::from_parts(1_513_013, 0).saturating_mul(n.into())) + // Minimum execution time: 11_488_000 picoseconds. + Weight::from_parts(93_073_710, 1489) + // Standard Error: 22_390 + .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -208,12 +212,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65` + // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_105_000 picoseconds. - Weight::from_parts(584_850, 1489) - // Standard Error: 1_417 - .saturating_add(Weight::from_parts(1_054_988, 0).saturating_mul(n.into())) + // Minimum execution time: 11_378_000 picoseconds. + Weight::from_parts(5_591_508, 1489) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -223,83 +227,83 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 709_000 picoseconds. - Weight::from_parts(7_409_096, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(95_342, 0).saturating_mul(i.into())) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(990_745, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) } /// Storage: Glutton TrashData (r:5000 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119036 + i * (1022 ±0)` + // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 584_000 picoseconds. - Weight::from_parts(674_000, 990) - // Standard Error: 1_802 - .saturating_add(Weight::from_parts(5_360_522, 0).saturating_mul(i.into())) + // Minimum execution time: 435_000 picoseconds. + Weight::from_parts(66_547_542, 990) + // Standard Error: 4_557 + .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton TrashData (r:1737 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900466` + // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 57_124_610_000 picoseconds. - Weight::from_parts(57_256_059_000, 5239782) + // Minimum execution time: 67_699_845_000 picoseconds. + Weight::from_parts(67_893_204_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton TrashData (r:5 w:0) /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9516` + // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 101_500_066_000 picoseconds. - Weight::from_parts(101_621_640_000, 16070) + // Minimum execution time: 122_297_527_000 picoseconds. + Weight::from_parts(122_394_818_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `1489` - // Minimum execution time: 4_164_000 picoseconds. - Weight::from_parts(4_378_000, 1489) + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 5_882_000 picoseconds. + Weight::from_parts(6_138_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_795_000 picoseconds. - Weight::from_parts(9_076_000, 0) + // Minimum execution time: 7_830_000 picoseconds. + Weight::from_parts(8_366_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(9_195_000, 0) + // Minimum execution time: 7_933_000 picoseconds. + Weight::from_parts(8_213_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 288d63512588b..1f0d86cc49cb7 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/grandpa" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/grandpa", features = ["serde"] } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] grandpa = { package = "finality-grandpa", version = "0.16.2", features = ["derive-codec"] } @@ -39,7 +39,7 @@ pallet-offences = { version = "4.0.0-dev", path = "../offences" } pallet-staking = { version = "4.0.0-dev", path = "../staking" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } -sp-keyring = { version = "7.0.0", path = "../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../primitives/keyring" } [features] default = ["std"] @@ -60,6 +60,33 @@ std = [ "sp-session/std", "sp-staking/std", "sp-std/std", + "frame-election-provider-support/std", + "pallet-balances/std", + "pallet-offences/std", + "pallet-staking/std", + "pallet-timestamp/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-offences/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-offences/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 3afd714f47e57..5ccf3794880eb 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -24,15 +24,11 @@ use frame_support::weights::{ }; impl crate::WeightInfo for () { - fn report_equivocation(validator_count: u32) -> Weight { + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. let validator_count = validator_count.max(100) as u64; - // worst case we are considering is that the given offender - // is backed by 200 nominators - const MAX_NOMINATORS: u64 = 200; - // checking membership proof Weight::from_parts(35u64 * WEIGHT_REF_TIME_PER_MICROS, 0) .saturating_add( @@ -45,11 +41,11 @@ impl crate::WeightInfo for () { // report offence .saturating_add(Weight::from_parts(110u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) .saturating_add(Weight::from_parts( - 25u64 * WEIGHT_REF_TIME_PER_MICROS * MAX_NOMINATORS, + 25u64 * WEIGHT_REF_TIME_PER_MICROS * max_nominators_per_validator as u64, 0, )) - .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().reads(14 + 3 * max_nominators_per_validator as u64)) + .saturating_add(DbWeight::get().writes(10 + 3 * max_nominators_per_validator as u64)) // fetching set id -> session index mappings .saturating_add(DbWeight::get().reads(2)) } diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 44d0266375230..16727f79a58d5 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -37,6 +37,7 @@ use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; +use frame_system::pallet_prelude::BlockNumberFor; use log::{error, info}; use sp_consensus_grandpa::{AuthorityId, EquivocationProof, RoundNumber, SetId, KEY_TYPE}; use sp_runtime::{ @@ -109,7 +110,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactioinsTypes`. +/// `offchain::SendTransactionTypes`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -118,7 +119,7 @@ pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, impl OffenceReportSystem< Option, - (EquivocationProof, T::KeyOwnerProof), + (EquivocationProof>, T::KeyOwnerProof), > for EquivocationReportSystem where T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, @@ -134,7 +135,7 @@ where type Longevity = L; fn publish_evidence( - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), ()> { use frame_system::offchain::SubmitTransaction; let (equivocation_proof, key_owner_proof) = evidence; @@ -152,7 +153,7 @@ where } fn check_evidence( - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), TransactionValidityError> { let (equivocation_proof, key_owner_proof) = evidence; @@ -172,7 +173,7 @@ where fn process_evidence( reporter: Option, - evidence: (EquivocationProof, T::KeyOwnerProof), + evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; let reporter = reporter.or_else(|| >::author()); diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 8311131e97347..2a0e707ac4148 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -42,6 +42,7 @@ use frame_support::{ weights::Weight, WeakBoundedVec, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_consensus_grandpa::{ ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, @@ -94,6 +95,10 @@ pub mod pallet { #[pallet::constant] type MaxAuthorities: Get; + /// The maximum number of nominators for each validator. + #[pallet::constant] + type MaxNominators: Get; + /// The maximum number of entries to keep in the set id to session index mapping. /// /// Since the `SetIdSession` map is only used for validating equivocations this @@ -113,13 +118,13 @@ pub mod pallet { /// (from an offchain context). type EquivocationReportSystem: OffenceReportSystem< Option, - (EquivocationProof, Self::KeyOwnerProof), + (EquivocationProof>, Self::KeyOwnerProof), >; } #[pallet::hooks] impl Hooks> for Pallet { - fn on_finalize(block_number: T::BlockNumber) { + fn on_finalize(block_number: BlockNumberFor) { // check for scheduled pending authority set changes if let Some(pending_change) = >::get() { // emit signal if we're at the block that scheduled the change @@ -188,10 +193,13 @@ pub mod pallet { /// against the extracted offender. If both are valid, the offence /// will be reported. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + #[pallet::weight(T::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] pub fn report_equivocation( origin: OriginFor, - equivocation_proof: Box>, + equivocation_proof: Box>>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; @@ -214,10 +222,13 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + #[pallet::weight(T::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] pub fn report_equivocation_unsigned( origin: OriginFor, - equivocation_proof: Box>, + equivocation_proof: Box>>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; @@ -245,8 +256,8 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::note_stalled())] pub fn note_stalled( origin: OriginFor, - delay: T::BlockNumber, - best_finalized_block_number: T::BlockNumber, + delay: BlockNumberFor, + best_finalized_block_number: BlockNumberFor, ) -> DispatchResult { ensure_root(origin)?; @@ -287,7 +298,7 @@ pub mod pallet { } #[pallet::type_value] - pub(super) fn DefaultForState() -> StoredState { + pub(super) fn DefaultForState() -> StoredState> { StoredState::Live } @@ -295,23 +306,23 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn state)] pub(super) type State = - StorageValue<_, StoredState, ValueQuery, DefaultForState>; + StorageValue<_, StoredState>, ValueQuery, DefaultForState>; /// Pending change: (signaled at, scheduled change). #[pallet::storage] #[pallet::getter(fn pending_change)] pub(super) type PendingChange = - StorageValue<_, StoredPendingChange>; + StorageValue<_, StoredPendingChange, T::MaxAuthorities>>; /// next block number where we can force a change. #[pallet::storage] #[pallet::getter(fn next_forced)] - pub(super) type NextForced = StorageValue<_, T::BlockNumber>; + pub(super) type NextForced = StorageValue<_, BlockNumberFor>; /// `true` if we are currently stalled. #[pallet::storage] #[pallet::getter(fn stalled)] - pub(super) type Stalled = StorageValue<_, (T::BlockNumber, T::BlockNumber)>; + pub(super) type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; /// The number of changes (both in terms of keys and underlying economic responsibilities) /// in the "set" of Grandpa validators from genesis. @@ -333,14 +344,16 @@ pub mod pallet { #[pallet::getter(fn session_for_set)] pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; - #[derive(Default)] + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] - pub struct GenesisConfig { + pub struct GenesisConfig { pub authorities: AuthorityList, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { CurrentSetId::::put(SetId::default()); Pallet::::initialize(&self.authorities) @@ -362,7 +375,7 @@ pub mod pallet { } pub trait WeightInfo { - fn report_equivocation(validator_count: u32) -> Weight; + fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; fn note_stalled() -> Weight; } @@ -427,7 +440,7 @@ impl Pallet { /// Schedule GRANDPA to pause starting in the given number of blocks. /// Cannot be done when already paused. - pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { + pub fn schedule_pause(in_blocks: BlockNumberFor) -> DispatchResult { if let StoredState::Live = >::get() { let scheduled_at = >::block_number(); >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); @@ -439,7 +452,7 @@ impl Pallet { } /// Schedule a resume of GRANDPA after pausing. - pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { + pub fn schedule_resume(in_blocks: BlockNumberFor) -> DispatchResult { if let StoredState::Paused = >::get() { let scheduled_at = >::block_number(); >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); @@ -466,8 +479,8 @@ impl Pallet { /// an error if a change is already pending. pub fn schedule_change( next_authorities: AuthorityList, - in_blocks: T::BlockNumber, - forced: Option, + in_blocks: BlockNumberFor, + forced: Option>, ) -> DispatchResult { if !>::exists() { let scheduled_at = >::block_number(); @@ -504,7 +517,7 @@ impl Pallet { } /// Deposit one of this module's logs. - fn deposit_log(log: ConsensusLog) { + fn deposit_log(log: ConsensusLog>) { let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); >::deposit_log(log); } @@ -528,13 +541,13 @@ impl Pallet { /// will push the transaction to the pool. Only useful in an offchain /// context. pub fn submit_unsigned_equivocation_report( - equivocation_proof: EquivocationProof, + equivocation_proof: EquivocationProof>, key_owner_proof: T::KeyOwnerProof, ) -> Option<()> { T::EquivocationReportSystem::publish_evidence((equivocation_proof, key_owner_proof)).ok() } - fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) { + fn on_stalled(further_wait: BlockNumberFor, median: BlockNumberFor) { // when we record old authority sets we could try to figure out _who_ // failed. until then, we can't meaningfully guard against // `next == last` the way that normal session changes do. diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index ffc566ffe74de..fd4d737dc493f 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -22,12 +22,13 @@ use crate::{self as pallet_grandpa, AuthorityId, AuthorityList, Config, ConsensusLog}; use ::grandpa as finality_grandpa; use codec::Encode; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ parameter_types, - traits::{ - ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize, - }, + traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; @@ -36,20 +37,16 @@ use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, impl_opaque_keys, - testing::{Header, TestXt, UintAuthorityId}, + testing::{TestXt, UintAuthorityId}, traits::{IdentityLookup, OpaqueKeys}, - DigestItem, Perbill, + BuildStorage, DigestItem, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Authorship: pallet_authorship, @@ -75,14 +72,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -144,7 +140,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -171,6 +167,7 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); + pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -180,14 +177,12 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBoundsOnChain; } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type RewardRemainder = (); - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; @@ -207,9 +202,10 @@ impl pallet_staking::Config for Test { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = (); + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -230,6 +226,7 @@ impl Config for Test { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = ConstU32<100>; + type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -257,7 +254,7 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 16d89307bb71f..59d73ee729ee8 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -838,7 +838,7 @@ fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -846,7 +846,7 @@ fn report_equivocation_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(::WeightInfo::report_equivocation) + .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 2479b0609a0ab..942fba9053294 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -38,10 +38,19 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index ba9749172e5f6..1532980574c2a 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -29,20 +29,16 @@ use frame_support::{ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Identity: pallet_identity::{Pallet, Call, Storage, Event}, } @@ -53,14 +49,13 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -87,7 +82,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -118,7 +113,7 @@ impl pallet_identity::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], } diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index faefd00fb961f..02fcd7db3c953 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_identity +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_identity. pub trait WeightInfo { @@ -76,10 +80,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_851_000 picoseconds. - Weight::from_parts(13_448_645, 2626) - // Standard Error: 1_636 - .saturating_add(Weight::from_parts(113_654, 0).saturating_mul(r.into())) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_515_830, 2626) + // Standard Error: 2_154 + .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,12 +95,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 33_342_000 picoseconds. - Weight::from_parts(33_155_116, 11003) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(56_409, 0).saturating_mul(r.into())) - // Standard Error: 450 - .saturating_add(Weight::from_parts(437_684, 0).saturating_mul(x.into())) + // Minimum execution time: 32_949_000 picoseconds. + Weight::from_parts(31_329_634, 11003) + // Standard Error: 4_496 + .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(429_346, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,10 +115,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `101` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 10_315_000 picoseconds. - Weight::from_parts(26_535_526, 11003) - // Standard Error: 4_344 - .saturating_add(Weight::from_parts(3_016_873, 0).saturating_mul(s.into())) + // Minimum execution time: 9_157_000 picoseconds. + Weight::from_parts(24_917_444, 11003) + // Standard Error: 4_554 + .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -132,10 +136,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 10_220_000 picoseconds. - Weight::from_parts(25_050_056, 11003) - // Standard Error: 3_621 - .saturating_add(Weight::from_parts(1_291_143, 0).saturating_mul(p.into())) + // Minimum execution time: 9_240_000 picoseconds. + Weight::from_parts(23_326_035, 11003) + // Standard Error: 3_664 + .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -149,16 +153,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 56_018_000 picoseconds. - Weight::from_parts(37_757_186, 11003) - // Standard Error: 1_852 - .saturating_add(Weight::from_parts(1_257_980, 0).saturating_mul(s.into())) - // Standard Error: 1_852 - .saturating_add(Weight::from_parts(215_426, 0).saturating_mul(x.into())) + // Minimum execution time: 55_687_000 picoseconds. + Weight::from_parts(30_695_182, 11003) + // Standard Error: 9_921 + .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) + // Standard Error: 1_937 + .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + // Standard Error: 1_937 + .saturating_add(Weight::from_parts(247_578, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -173,12 +179,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 34_792_000 picoseconds. - Weight::from_parts(35_368_327, 11003) - // Standard Error: 3_476 - .saturating_add(Weight::from_parts(78_981, 0).saturating_mul(r.into())) - // Standard Error: 678 - .saturating_add(Weight::from_parts(459_077, 0).saturating_mul(x.into())) + // Minimum execution time: 34_876_000 picoseconds. + Weight::from_parts(32_207_018, 11003) + // Standard Error: 5_247 + .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Standard Error: 1_023 + .saturating_add(Weight::from_parts(458_329, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,14 +192,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. - fn cancel_request(_r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 31_306_000 picoseconds. - Weight::from_parts(33_304_799, 11003) - // Standard Error: 892 - .saturating_add(Weight::from_parts(451_655, 0).saturating_mul(x.into())) + // Minimum execution time: 30_689_000 picoseconds. + Weight::from_parts(31_967_170, 11003) + // Standard Error: 5_387 + .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Standard Error: 1_051 + .saturating_add(Weight::from_parts(446_213, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -204,10 +212,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_215_000 picoseconds. - Weight::from_parts(8_692_102, 2626) - // Standard Error: 1_455 - .saturating_add(Weight::from_parts(110_912, 0).saturating_mul(r.into())) + // Minimum execution time: 7_357_000 picoseconds. + Weight::from_parts(7_932_950, 2626) + // Standard Error: 1_804 + .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -218,10 +226,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_397_000 picoseconds. - Weight::from_parts(8_787_656, 2626) - // Standard Error: 1_440 - .saturating_add(Weight::from_parts(111_212, 0).saturating_mul(r.into())) + // Minimum execution time: 7_437_000 picoseconds. + Weight::from_parts(8_051_889, 2626) + // Standard Error: 1_997 + .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -232,10 +240,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_466_000 picoseconds. - Weight::from_parts(8_689_763, 2626) - // Standard Error: 1_536 - .saturating_add(Weight::from_parts(106_371, 0).saturating_mul(r.into())) + // Minimum execution time: 7_385_000 picoseconds. + Weight::from_parts(7_911_589, 2626) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -249,12 +257,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 25_132_000 picoseconds. - Weight::from_parts(20_582_313, 11003) - // Standard Error: 10_427 - .saturating_add(Weight::from_parts(277_545, 0).saturating_mul(r.into())) - // Standard Error: 1_929 - .saturating_add(Weight::from_parts(747_966, 0).saturating_mul(x.into())) + // Minimum execution time: 24_073_000 picoseconds. + Weight::from_parts(17_817_684, 11003) + // Standard Error: 8_612 + .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Standard Error: 1_593 + .saturating_add(Weight::from_parts(755_225, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -273,14 +281,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 72_043_000 picoseconds. - Weight::from_parts(50_994_735, 11003) - // Standard Error: 9_304 - .saturating_add(Weight::from_parts(123_052, 0).saturating_mul(r.into())) - // Standard Error: 1_817 - .saturating_add(Weight::from_parts(1_256_713, 0).saturating_mul(s.into())) - // Standard Error: 1_817 - .saturating_add(Weight::from_parts(219_242, 0).saturating_mul(x.into())) + // Minimum execution time: 73_981_000 picoseconds. + Weight::from_parts(51_684_057, 11003) + // Standard Error: 12_662 + .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(240_907, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -296,10 +304,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(35_975_985, 11003) - // Standard Error: 1_625 - .saturating_add(Weight::from_parts(73_263, 0).saturating_mul(s.into())) + // Minimum execution time: 29_367_000 picoseconds. + Weight::from_parts(34_214_998, 11003) + // Standard Error: 1_522 + .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -312,10 +320,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 13_586_000 picoseconds. - Weight::from_parts(15_909_245, 11003) - // Standard Error: 611 - .saturating_add(Weight::from_parts(16_949, 0).saturating_mul(s.into())) + // Minimum execution time: 12_384_000 picoseconds. + Weight::from_parts(14_417_903, 11003) + // Standard Error: 539 + .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +338,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 34_286_000 picoseconds. - Weight::from_parts(37_391_401, 11003) - // Standard Error: 1_099 - .saturating_add(Weight::from_parts(61_165, 0).saturating_mul(s.into())) + // Minimum execution time: 33_327_000 picoseconds. + Weight::from_parts(36_208_941, 11003) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -341,16 +349,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// Storage: Identity SubsOf (r:1 w:1) /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + s * (37 ±0)` + // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 22_197_000 picoseconds. - Weight::from_parts(24_630_311, 6723) - // Standard Error: 1_092 - .saturating_add(Weight::from_parts(63_415, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 23_764_000 picoseconds. + Weight::from_parts(26_407_731, 6723) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -364,10 +374,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_851_000 picoseconds. - Weight::from_parts(13_448_645, 2626) - // Standard Error: 1_636 - .saturating_add(Weight::from_parts(113_654, 0).saturating_mul(r.into())) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_515_830, 2626) + // Standard Error: 2_154 + .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -379,12 +389,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `442 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 33_342_000 picoseconds. - Weight::from_parts(33_155_116, 11003) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(56_409, 0).saturating_mul(r.into())) - // Standard Error: 450 - .saturating_add(Weight::from_parts(437_684, 0).saturating_mul(x.into())) + // Minimum execution time: 32_949_000 picoseconds. + Weight::from_parts(31_329_634, 11003) + // Standard Error: 4_496 + .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(429_346, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -399,10 +409,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `101` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 10_315_000 picoseconds. - Weight::from_parts(26_535_526, 11003) - // Standard Error: 4_344 - .saturating_add(Weight::from_parts(3_016_873, 0).saturating_mul(s.into())) + // Minimum execution time: 9_157_000 picoseconds. + Weight::from_parts(24_917_444, 11003) + // Standard Error: 4_554 + .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -420,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 10_220_000 picoseconds. - Weight::from_parts(25_050_056, 11003) - // Standard Error: 3_621 - .saturating_add(Weight::from_parts(1_291_143, 0).saturating_mul(p.into())) + // Minimum execution time: 9_240_000 picoseconds. + Weight::from_parts(23_326_035, 11003) + // Standard Error: 3_664 + .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -437,16 +447,18 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, x: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 56_018_000 picoseconds. - Weight::from_parts(37_757_186, 11003) - // Standard Error: 1_852 - .saturating_add(Weight::from_parts(1_257_980, 0).saturating_mul(s.into())) - // Standard Error: 1_852 - .saturating_add(Weight::from_parts(215_426, 0).saturating_mul(x.into())) + // Minimum execution time: 55_687_000 picoseconds. + Weight::from_parts(30_695_182, 11003) + // Standard Error: 9_921 + .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) + // Standard Error: 1_937 + .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + // Standard Error: 1_937 + .saturating_add(Weight::from_parts(247_578, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -461,12 +473,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `367 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 34_792_000 picoseconds. - Weight::from_parts(35_368_327, 11003) - // Standard Error: 3_476 - .saturating_add(Weight::from_parts(78_981, 0).saturating_mul(r.into())) - // Standard Error: 678 - .saturating_add(Weight::from_parts(459_077, 0).saturating_mul(x.into())) + // Minimum execution time: 34_876_000 picoseconds. + Weight::from_parts(32_207_018, 11003) + // Standard Error: 5_247 + .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Standard Error: 1_023 + .saturating_add(Weight::from_parts(458_329, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -474,14 +486,16 @@ impl WeightInfo for () { /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. - fn cancel_request(_r: u32, x: u32, ) -> Weight { + fn cancel_request(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `398 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 31_306_000 picoseconds. - Weight::from_parts(33_304_799, 11003) - // Standard Error: 892 - .saturating_add(Weight::from_parts(451_655, 0).saturating_mul(x.into())) + // Minimum execution time: 30_689_000 picoseconds. + Weight::from_parts(31_967_170, 11003) + // Standard Error: 5_387 + .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Standard Error: 1_051 + .saturating_add(Weight::from_parts(446_213, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -492,10 +506,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_215_000 picoseconds. - Weight::from_parts(8_692_102, 2626) - // Standard Error: 1_455 - .saturating_add(Weight::from_parts(110_912, 0).saturating_mul(r.into())) + // Minimum execution time: 7_357_000 picoseconds. + Weight::from_parts(7_932_950, 2626) + // Standard Error: 1_804 + .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -506,10 +520,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_397_000 picoseconds. - Weight::from_parts(8_787_656, 2626) - // Standard Error: 1_440 - .saturating_add(Weight::from_parts(111_212, 0).saturating_mul(r.into())) + // Minimum execution time: 7_437_000 picoseconds. + Weight::from_parts(8_051_889, 2626) + // Standard Error: 1_997 + .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -520,10 +534,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_466_000 picoseconds. - Weight::from_parts(8_689_763, 2626) - // Standard Error: 1_536 - .saturating_add(Weight::from_parts(106_371, 0).saturating_mul(r.into())) + // Minimum execution time: 7_385_000 picoseconds. + Weight::from_parts(7_911_589, 2626) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -537,12 +551,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `445 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 25_132_000 picoseconds. - Weight::from_parts(20_582_313, 11003) - // Standard Error: 10_427 - .saturating_add(Weight::from_parts(277_545, 0).saturating_mul(r.into())) - // Standard Error: 1_929 - .saturating_add(Weight::from_parts(747_966, 0).saturating_mul(x.into())) + // Minimum execution time: 24_073_000 picoseconds. + Weight::from_parts(17_817_684, 11003) + // Standard Error: 8_612 + .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Standard Error: 1_593 + .saturating_add(Weight::from_parts(755_225, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -561,14 +575,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 72_043_000 picoseconds. - Weight::from_parts(50_994_735, 11003) - // Standard Error: 9_304 - .saturating_add(Weight::from_parts(123_052, 0).saturating_mul(r.into())) - // Standard Error: 1_817 - .saturating_add(Weight::from_parts(1_256_713, 0).saturating_mul(s.into())) - // Standard Error: 1_817 - .saturating_add(Weight::from_parts(219_242, 0).saturating_mul(x.into())) + // Minimum execution time: 73_981_000 picoseconds. + Weight::from_parts(51_684_057, 11003) + // Standard Error: 12_662 + .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(240_907, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -584,10 +598,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(35_975_985, 11003) - // Standard Error: 1_625 - .saturating_add(Weight::from_parts(73_263, 0).saturating_mul(s.into())) + // Minimum execution time: 29_367_000 picoseconds. + Weight::from_parts(34_214_998, 11003) + // Standard Error: 1_522 + .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -600,10 +614,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 13_586_000 picoseconds. - Weight::from_parts(15_909_245, 11003) - // Standard Error: 611 - .saturating_add(Weight::from_parts(16_949, 0).saturating_mul(s.into())) + // Minimum execution time: 12_384_000 picoseconds. + Weight::from_parts(14_417_903, 11003) + // Standard Error: 539 + .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -618,10 +632,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 34_286_000 picoseconds. - Weight::from_parts(37_391_401, 11003) - // Standard Error: 1_099 - .saturating_add(Weight::from_parts(61_165, 0).saturating_mul(s.into())) + // Minimum execution time: 33_327_000 picoseconds. + Weight::from_parts(36_208_941, 11003) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -629,16 +643,18 @@ impl WeightInfo for () { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// Storage: Identity SubsOf (r:1 w:1) /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + s * (37 ±0)` + // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 22_197_000 picoseconds. - Weight::from_parts(24_630_311, 6723) - // Standard Error: 1_092 - .saturating_add(Weight::from_parts(63_415, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 23_764_000 picoseconds. + Weight::from_parts(26_407_731, 6723) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 881139ad5aa84..076bf91962efd 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-session = { version = "4.0.0-dev", path = "../session" } @@ -46,6 +46,19 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-std/std", + "pallet-session/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-session/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index f90dcd53b3ef9..d8170d4817e3e 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -24,7 +24,6 @@ use super::*; use frame_benchmarking::v1::benchmarks; use frame_support::{traits::UnfilteredDispatchable, WeakBoundedVec}; use frame_system::RawOrigin; -use sp_core::{offchain::OpaqueMultiaddr, OpaquePeerId}; use sp_runtime::{ traits::{ValidateUnsigned, Zero}, transaction_validity::TransactionSource, @@ -33,13 +32,14 @@ use sp_runtime::{ use crate::Pallet as ImOnline; const MAX_KEYS: u32 = 1000; -const MAX_EXTERNAL_ADDRESSES: u32 = 100; pub fn create_heartbeat( k: u32, - e: u32, ) -> Result< - (crate::Heartbeat, ::Signature), + ( + crate::Heartbeat>, + ::Signature, + ), &'static str, > { let mut keys = Vec::new(); @@ -50,13 +50,8 @@ pub fn create_heartbeat( .map_err(|()| "More than the maximum number of keys provided")?; Keys::::put(bounded_keys); - let network_state = OpaqueNetworkState { - peer_id: OpaquePeerId::default(), - external_addresses: vec![OpaqueMultiaddr::new(vec![0; 32]); e as usize], - }; let input_heartbeat = Heartbeat { - block_number: T::BlockNumber::zero(), - network_state, + block_number: frame_system::pallet_prelude::BlockNumberFor::::zero(), session_index: 0, authority_index: k - 1, validators_len: keys.len() as u32, @@ -73,15 +68,13 @@ benchmarks! { #[extra] heartbeat { let k in 1 .. MAX_KEYS; - let e in 1 .. MAX_EXTERNAL_ADDRESSES; - let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + let (input_heartbeat, signature) = create_heartbeat::(k)?; }: _(RawOrigin::None, input_heartbeat, signature) #[extra] validate_unsigned { let k in 1 .. MAX_KEYS; - let e in 1 .. MAX_EXTERNAL_ADDRESSES; - let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + let (input_heartbeat, signature) = create_heartbeat::(k)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; }: { ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) @@ -90,8 +83,7 @@ benchmarks! { validate_unsigned_and_then_heartbeat { let k in 1 .. MAX_KEYS; - let e in 1 .. MAX_EXTERNAL_ADDRESSES; - let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + let (input_heartbeat, signature) = create_heartbeat::(k)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; let call_enc = call.encode(); }: { diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index dd3809f8e9a70..1de89dd00c812 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -26,8 +26,7 @@ //! in the current era or session. //! //! The heartbeat is a signed transaction, which was signed using the session key -//! and includes the recent best block number of the local validators chain as well -//! as the [NetworkState](../../client/offchain/struct.NetworkState.html). +//! and includes the recent best block number of the local validators chain. //! It is submitted as an Unsigned Transaction via off-chain workers. //! //! - [`Config`] @@ -78,23 +77,27 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migration; mod mock; mod tests; pub mod weights; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ + pallet_prelude::*, traits::{ EstimateNextSessionRotation, Get, OneSessionHandler, ValidatorSet, - ValidatorSetWithIdentification, WrapperOpaque, + ValidatorSetWithIdentification, }, BoundedSlice, WeakBoundedVec, }; -use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +use frame_system::{ + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::*, +}; pub use pallet::*; use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; -use sp_core::offchain::OpaqueNetworkState; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, traits::{AtLeast32BitUnsigned, Convert, Saturating, TrailingZeroInput}, @@ -190,7 +193,6 @@ enum OffchainErr { AlreadyOnline(u32), FailedSigning, FailedToAcquireLock, - NetworkState, SubmitTransaction, } @@ -206,7 +208,6 @@ impl sp_std::fmt::Debug for OffchainErr write!(fmt, "Failed to sign heartbeat"), OffchainErr::FailedToAcquireLock => write!(fmt, "Failed to acquire lock"), - OffchainErr::NetworkState => write!(fmt, "Failed to fetch network state"), OffchainErr::SubmitTransaction => write!(fmt, "Failed to submit transaction"), } } @@ -222,8 +223,6 @@ where { /// Block number at the time heartbeat is created.. pub block_number: BlockNumber, - /// A state of local network (peer id and external addresses) - pub network_state: OpaqueNetworkState, /// Index of the current session. pub session_index: SessionIndex, /// An index of the authority on the list of validators. @@ -232,64 +231,6 @@ where pub validators_len: u32, } -/// A type that is the same as [`OpaqueNetworkState`] but with [`Vec`] replaced with -/// [`WeakBoundedVec`] where Limit is the respective size limit -/// `PeerIdEncodingLimit` represents the size limit of the encoding of `PeerId` -/// `MultiAddrEncodingLimit` represents the size limit of the encoding of `MultiAddr` -/// `AddressesLimit` represents the size limit of the vector of peers connected -#[derive(Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[codec(mel_bound())] -#[scale_info(skip_type_params(PeerIdEncodingLimit, MultiAddrEncodingLimit, AddressesLimit))] -pub struct BoundedOpaqueNetworkState -where - PeerIdEncodingLimit: Get, - MultiAddrEncodingLimit: Get, - AddressesLimit: Get, -{ - /// PeerId of the local node in SCALE encoded. - pub peer_id: WeakBoundedVec, - /// List of addresses the node knows it can be reached as. - pub external_addresses: - WeakBoundedVec, AddressesLimit>, -} - -impl, MultiAddrEncodingLimit: Get, AddressesLimit: Get> - BoundedOpaqueNetworkState -{ - fn force_from(ons: &OpaqueNetworkState) -> Self { - let peer_id = WeakBoundedVec::<_, PeerIdEncodingLimit>::force_from( - ons.peer_id.0.clone(), - Some( - "Warning: The size of the encoding of PeerId \ - is bigger than expected. A runtime configuration \ - adjustment may be needed.", - ), - ); - - let external_addresses = WeakBoundedVec::<_, AddressesLimit>::force_from( - ons.external_addresses - .iter() - .map(|x| { - WeakBoundedVec::<_, MultiAddrEncodingLimit>::force_from( - x.0.clone(), - Some( - "Warning: The size of the encoding of MultiAddr \ - is bigger than expected. A runtime configuration \ - adjustment may be needed.", - ), - ) - }) - .collect(), - Some( - "Warning: The network has more peers than expected \ - A runtime configuration adjustment may be needed.", - ), - ); - - Self { peer_id, external_addresses } - } -} - /// A type for representing the validator id in a session. pub type ValidatorId = <::ValidatorSet as ValidatorSet< ::AccountId, @@ -304,15 +245,17 @@ pub type IdentificationTuple = ( >>::Identification, ); -type OffchainResult = Result::BlockNumber>>; +type OffchainResult = Result>>; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -331,10 +274,6 @@ pub mod pallet { /// The maximum number of peers to be stored in `ReceivedHeartbeats` type MaxPeerInHeartbeats: Get; - /// The maximum size of the encoding of `PeerId` and `MultiAddr` that are coming - /// from the hearbeat - type MaxPeerDataEncodingSize: Get; - /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -348,7 +287,7 @@ pub mod pallet { /// rough time when we should start considering sending heartbeats, since the workers /// avoids sending them at the very beginning of the session, assuming there is a /// chance the authority will produce a block and they won't be necessary. - type NextSessionRotation: EstimateNextSessionRotation; + type NextSessionRotation: EstimateNextSessionRotation>; /// A type that gives us the ability to submit unresponsiveness offence reports. type ReportUnresponsiveness: ReportOffence< @@ -400,38 +339,25 @@ pub mod pallet { /// more accurate then the value we calculate for `HeartbeatAfter`. #[pallet::storage] #[pallet::getter(fn heartbeat_after)] - pub(crate) type HeartbeatAfter = StorageValue<_, T::BlockNumber, ValueQuery>; + pub(super) type HeartbeatAfter = StorageValue<_, BlockNumberFor, ValueQuery>; /// The current set of keys that may issue a heartbeat. #[pallet::storage] #[pallet::getter(fn keys)] - pub(crate) type Keys = + pub(super) type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; - /// For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to - /// `WrapperOpaque`. + /// For each session index, we keep a mapping of `SessionIndex` and `AuthIndex`. #[pallet::storage] #[pallet::getter(fn received_heartbeats)] - pub(crate) type ReceivedHeartbeats = StorageDoubleMap< - _, - Twox64Concat, - SessionIndex, - Twox64Concat, - AuthIndex, - WrapperOpaque< - BoundedOpaqueNetworkState< - T::MaxPeerDataEncodingSize, - T::MaxPeerDataEncodingSize, - T::MaxPeerInHeartbeats, - >, - >, - >; + pub(super) type ReceivedHeartbeats = + StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, bool>; /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. #[pallet::storage] #[pallet::getter(fn authored_blocks)] - pub(crate) type AuthoredBlocks = StorageDoubleMap< + pub(super) type AuthoredBlocks = StorageDoubleMap< _, Twox64Concat, SessionIndex, @@ -448,7 +374,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize_keys(&self.keys); } @@ -457,20 +383,17 @@ pub mod pallet { #[pallet::call] impl Pallet { /// ## Complexity: - /// - `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is length of - /// `heartbeat.network_state.external_address` + /// - `O(K)` where K is length of `Keys` (heartbeat.validators_len) /// - `O(K)`: decoding of length `K` - /// - `O(E)`: decoding/encoding of length `E` // NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to // import block with such an extrinsic. #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::validate_unsigned_and_then_heartbeat( - heartbeat.validators_len as u32, - heartbeat.network_state.external_addresses.len() as u32, + heartbeat.validators_len, ))] pub fn heartbeat( origin: OriginFor, - heartbeat: Heartbeat, + heartbeat: Heartbeat>, // since signature verification is done in `validate_unsigned` // we can skip doing it here again. _signature: ::Signature, @@ -479,22 +402,13 @@ pub mod pallet { let current_session = T::ValidatorSet::session_index(); let exists = - ReceivedHeartbeats::::contains_key(¤t_session, &heartbeat.authority_index); + ReceivedHeartbeats::::contains_key(current_session, heartbeat.authority_index); let keys = Keys::::get(); let public = keys.get(heartbeat.authority_index as usize); if let (false, Some(public)) = (exists, public) { Self::deposit_event(Event::::HeartbeatReceived { authority_id: public.clone() }); - let network_state_bounded = BoundedOpaqueNetworkState::< - T::MaxPeerDataEncodingSize, - T::MaxPeerDataEncodingSize, - T::MaxPeerInHeartbeats, - >::force_from(&heartbeat.network_state); - ReceivedHeartbeats::::insert( - ¤t_session, - &heartbeat.authority_index, - WrapperOpaque::from(network_state_bounded), - ); + ReceivedHeartbeats::::insert(current_session, heartbeat.authority_index, true); Ok(()) } else if exists { @@ -591,7 +505,7 @@ pub mod pallet { /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. impl - pallet_authorship::EventHandler, T::BlockNumber> for Pallet + pallet_authorship::EventHandler, BlockNumberFor> for Pallet { fn note_author(author: ValidatorId) { Self::note_authorship(author); @@ -618,26 +532,26 @@ impl Pallet { fn is_online_aux(authority_index: AuthIndex, authority: &ValidatorId) -> bool { let current_session = T::ValidatorSet::session_index(); - ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) || - AuthoredBlocks::::get(¤t_session, authority) != 0 + ReceivedHeartbeats::::contains_key(current_session, authority_index) || + AuthoredBlocks::::get(current_session, authority) != 0 } /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in /// the authorities series, during the current session. Otherwise `false`. pub fn received_heartbeat_in_current_session(authority_index: AuthIndex) -> bool { let current_session = T::ValidatorSet::session_index(); - ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) + ReceivedHeartbeats::::contains_key(current_session, authority_index) } /// Note that the given authority has authored a block in the current session. fn note_authorship(author: ValidatorId) { let current_session = T::ValidatorSet::session_index(); - AuthoredBlocks::::mutate(¤t_session, author, |authored| *authored += 1); + AuthoredBlocks::::mutate(current_session, author, |authored| *authored += 1); } pub(crate) fn send_heartbeats( - block_number: T::BlockNumber, + block_number: BlockNumberFor, ) -> OffchainResult>> { const START_HEARTBEAT_RANDOM_PERIOD: Permill = Permill::from_percent(10); const START_HEARTBEAT_FINAL_PERIOD: Permill = Permill::from_percent(80); @@ -700,20 +614,13 @@ impl Pallet { authority_index: u32, key: T::AuthorityId, session_index: SessionIndex, - block_number: T::BlockNumber, + block_number: BlockNumberFor, validators_len: u32, ) -> OffchainResult { // A helper function to prepare heartbeat call. let prepare_heartbeat = || -> OffchainResult> { - let network_state = - sp_io::offchain::network_state().map_err(|_| OffchainErr::NetworkState)?; - let heartbeat = Heartbeat { - block_number, - network_state, - session_index, - authority_index, - validators_len, - }; + let heartbeat = + Heartbeat { block_number, session_index, authority_index, validators_len }; let signature = key.sign(&heartbeat.encode()).ok_or(OffchainErr::FailedSigning)?; @@ -749,7 +656,7 @@ impl Pallet { // // At index `idx`: // 1. A (ImOnline) public key to be used by a validator at index `idx` to send im-online - // heartbeats. + // heartbeats. let authorities = Keys::::get(); // local keystore @@ -770,7 +677,7 @@ impl Pallet { fn with_heartbeat_lock( authority_index: u32, session_index: SessionIndex, - now: T::BlockNumber, + now: BlockNumberFor, f: impl FnOnce() -> OffchainResult, ) -> OffchainResult { let key = { @@ -780,7 +687,7 @@ impl Pallet { }; let storage = StorageValueRef::persistent(&key); let res = storage.mutate( - |status: Result>, StorageRetrievalError>| { + |status: Result>>, StorageRetrievalError>| { // Check if there is already a lock for that particular block. // This means that the heartbeat has already been sent, and we are just waiting // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD @@ -887,9 +794,9 @@ impl OneSessionHandler for Pallet { // current session, they have already been processed and won't be needed // anymore. #[allow(deprecated)] - ReceivedHeartbeats::::remove_prefix(&T::ValidatorSet::session_index(), None); + ReceivedHeartbeats::::remove_prefix(T::ValidatorSet::session_index(), None); #[allow(deprecated)] - AuthoredBlocks::::remove_prefix(&T::ValidatorSet::session_index(), None); + AuthoredBlocks::::remove_prefix(T::ValidatorSet::session_index(), None); if offenders.is_empty() { Self::deposit_event(Event::::AllGood); diff --git a/frame/im-online/src/migration.rs b/frame/im-online/src/migration.rs new file mode 100644 index 0000000000000..84652965972e3 --- /dev/null +++ b/frame/im-online/src/migration.rs @@ -0,0 +1,161 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the im-online pallet. + +use super::*; +use frame_support::{storage_alias, traits::OnRuntimeUpgrade}; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +/// The log target. +const TARGET: &str = "runtime::im-online::migration::v1"; + +/// The original data layout of the im-online pallet (`ReceivedHeartbeats` storage item). +mod v0 { + use super::*; + use frame_support::traits::WrapperOpaque; + + #[derive(Encode, Decode, Default)] + pub(super) struct BoundedOpaqueNetworkState { + /// PeerId of the local node in SCALE encoded. + pub peer_id: Vec, + /// List of addresses the node knows it can be reached as. + pub external_addresses: Vec>, + } + + #[storage_alias] + pub(super) type ReceivedHeartbeats = StorageDoubleMap< + Pallet, + Twox64Concat, + SessionIndex, + Twox64Concat, + AuthIndex, + WrapperOpaque, + >; +} + +pub mod v1 { + use super::*; + + /// Simple migration that replaces `ReceivedHeartbeats` values with `true`. + pub struct Migration(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for Migration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + let count = v0::ReceivedHeartbeats::::iter().count(); + log::info!(target: TARGET, "Migrating {} received heartbeats", count); + + Ok((count as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + if StorageVersion::get::>() != 0 { + log::warn!( + target: TARGET, + "Skipping migration because current storage version is not 0" + ); + return weight + } + + let heartbeats = v0::ReceivedHeartbeats::::drain().collect::>(); + + weight.saturating_accrue(T::DbWeight::get().reads(heartbeats.len() as u64)); + weight.saturating_accrue(T::DbWeight::get().writes(heartbeats.len() as u64)); + + for (session_index, auth_index, _) in heartbeats { + log::trace!( + target: TARGET, + "Migrated received heartbeat for {:?}...", + (session_index, auth_index) + ); + crate::ReceivedHeartbeats::::insert(session_index, auth_index, true); + } + + StorageVersion::new(1).put::>(); + weight.saturating_add(T::DbWeight::get().writes(1)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> DispatchResult { + let old_received_heartbeats: u32 = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_received_heartbeats = crate::ReceivedHeartbeats::::iter().count(); + + if new_received_heartbeats != old_received_heartbeats as usize { + log::error!( + target: TARGET, + "migrated {} received heartbeats, expected {}", + new_received_heartbeats, + old_received_heartbeats + ); + } + ensure!(StorageVersion::get::>() >= 1, "must upgrade"); + + Ok(()) + } + } +} + +#[cfg(all(feature = "try-runtime", test))] +mod test { + use super::*; + use crate::mock::{new_test_ext, Runtime as T}; + use frame_support::traits::WrapperOpaque; + + #[test] + fn migration_works() { + new_test_ext().execute_with(|| { + assert_eq!(StorageVersion::get::>(), 0); + + // Insert some received heartbeats into the v0 storage: + let current_session = ::ValidatorSet::session_index(); + v0::ReceivedHeartbeats::::insert( + ¤t_session, + 0, + WrapperOpaque(v0::BoundedOpaqueNetworkState::default()), + ); + v0::ReceivedHeartbeats::::insert( + ¤t_session, + 1, + WrapperOpaque(v0::BoundedOpaqueNetworkState::default()), + ); + + // Check that the v0 storage is populated + assert_eq!(v0::ReceivedHeartbeats::::iter().count(), 2); + assert_eq!(crate::ReceivedHeartbeats::::iter().count(), 0, "V1 storage corrupted"); + + // Perform the migration + let state = v1::Migration::::pre_upgrade().unwrap(); + let _w = v1::Migration::::on_runtime_upgrade(); + v1::Migration::::post_upgrade(state).unwrap(); + + // Check that the v1 storage is populated and v0 storage is empty + assert_eq!(v0::ReceivedHeartbeats::::iter().count(), 0); + assert_eq!(crate::ReceivedHeartbeats::::iter().count(), 2); + assert!(crate::ReceivedHeartbeats::::contains_key(¤t_session, 0)); + assert_eq!(Some(true), crate::ReceivedHeartbeats::::get(¤t_session, 1)); + + assert_eq!(StorageVersion::get::>(), 1); + }); + } +} diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 64e77b24b9b09..85da061fe904a 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -27,9 +27,9 @@ use frame_support::{ use pallet_session::historical as pallet_session_historical; use sp_core::H256; use sp_runtime::{ - testing::{Header, TestXt, UintAuthorityId}, + testing::{TestXt, UintAuthorityId}, traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Permill, + BuildStorage, Permill, }; use sp_staking::{ offence::{OffenceError, ReportOffence}, @@ -39,16 +39,12 @@ use sp_staking::{ use crate as imonline; use crate::Config; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, ImOnline: imonline::{Pallet, Call, Storage, Config, Event}, Historical: pallet_session_historical::{Pallet}, @@ -105,7 +101,7 @@ impl ReportOffence for OffenceHandler { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut result: sp_io::TestExternalities = t.into(); // Set the default keys, otherwise session will discard the validator. result.execute_with(|| { @@ -123,14 +119,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -217,7 +212,6 @@ impl Config for Runtime { type WeightInfo = (); type MaxKeys = ConstU32<10_000>; type MaxPeerInHeartbeats = ConstU32<10_000>; - type MaxPeerDataEncodingSize = ConstU32<1_000>; } impl frame_system::offchain::SendTransactionTypes for Runtime diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 80320959c53bd..79036760c2d42 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -22,12 +22,9 @@ use super::*; use crate::mock::*; use frame_support::{assert_noop, dispatch}; -use sp_core::{ - offchain::{ - testing::{TestOffchainExt, TestTransactionPoolExt}, - OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, - }, - OpaquePeerId, +use sp_core::offchain::{ + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }; use sp_runtime::{ testing::UintAuthorityId, @@ -121,14 +118,8 @@ fn heartbeat( id: UintAuthorityId, validators: Vec, ) -> dispatch::DispatchResult { - use frame_support::unsigned::ValidateUnsigned; - let heartbeat = Heartbeat { block_number, - network_state: OpaqueNetworkState { - peer_id: OpaquePeerId(vec![1]), - external_addresses: vec![], - }, session_index, authority_index, validators_len: validators.len() as u32, @@ -212,8 +203,6 @@ fn late_heartbeat_and_invalid_keys_len_should_fail() { #[test] fn should_generate_heartbeats() { - use frame_support::traits::OffchainWorker; - let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); let (pool, state) = TestTransactionPoolExt::new(); @@ -252,7 +241,6 @@ fn should_generate_heartbeats() { heartbeat, Heartbeat { block_number: block, - network_state: sp_io::offchain::network_state().unwrap(), session_index: 2, authority_index: 2, validators_len: 3, @@ -365,21 +353,13 @@ fn should_not_send_a_report_if_already_online() { assert_eq!( heartbeat, - Heartbeat { - block_number: 4, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 0, - validators_len: 3, - } + Heartbeat { block_number: 4, session_index: 2, authority_index: 0, validators_len: 3 } ); }); } #[test] fn should_handle_missing_progress_estimates() { - use frame_support::traits::OffchainWorker; - let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); let (pool, state) = TestTransactionPoolExt::new(); diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 64c1eb5f3a9b0..c3db02af25782 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_im_online //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_im_online +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,13 +45,14 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_im_online. pub trait WeightInfo { - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; + fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight; } /// Weights for pallet_im_online using the Substrate node and recommended hardware. @@ -61,25 +65,21 @@ impl WeightInfo for SubstrateWeight { /// Storage: ImOnline Keys (r:1 w:0) /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(10021032), added: 10023507, mode: MaxEncodedLen) + /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) /// Storage: ImOnline AuthoredBlocks (r:1 w:0) /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) /// The range of component `k` is `[1, 1000]`. - /// The range of component `e` is `[1, 100]`. - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `295 + k * (32 ±0)` - // Estimated: `10024497 + e * (35 ±0) + k * (32 ±0)` - // Minimum execution time: 95_573_000 picoseconds. - Weight::from_parts(78_856_572, 10024497) - // Standard Error: 315 - .saturating_add(Weight::from_parts(22_926, 0).saturating_mul(k.into())) - // Standard Error: 3_181 - .saturating_add(Weight::from_parts(362_093, 0).saturating_mul(e.into())) + // Estimated: `321487 + k * (1761 ±0)` + // Minimum execution time: 80_568_000 picoseconds. + Weight::from_parts(95_175_595, 321487) + // Standard Error: 627 + .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 35).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) } } @@ -92,24 +92,20 @@ impl WeightInfo for () { /// Storage: ImOnline Keys (r:1 w:0) /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(10021032), added: 10023507, mode: MaxEncodedLen) + /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) /// Storage: ImOnline AuthoredBlocks (r:1 w:0) /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) /// The range of component `k` is `[1, 1000]`. - /// The range of component `e` is `[1, 100]`. - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `295 + k * (32 ±0)` - // Estimated: `10024497 + e * (35 ±0) + k * (32 ±0)` - // Minimum execution time: 95_573_000 picoseconds. - Weight::from_parts(78_856_572, 10024497) - // Standard Error: 315 - .saturating_add(Weight::from_parts(22_926, 0).saturating_mul(k.into())) - // Standard Error: 3_181 - .saturating_add(Weight::from_parts(362_093, 0).saturating_mul(e.into())) + // Estimated: `321487 + k * (1761 ±0)` + // Minimum execution time: 80_568_000 picoseconds. + Weight::from_parts(95_175_595, 321487) + // Standard Error: 627 + .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 35).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) } } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 6895f076da22d..52fe97cc7e071 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -13,16 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-keyring = { version = "7.0.0", optional = true, path = "../../primitives/keyring" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -40,10 +40,18 @@ std = [ "sp-keyring", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 56e177da3148f..3c0b49304131f 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -75,7 +75,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::call] impl Pallet { @@ -269,7 +269,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { for (a, b) in &self.indices { >::insert(a, (b, >::zero(), false)) diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 8bd05d04ab4e1..d63081e0b73f8 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -22,18 +22,14 @@ use crate::{self as pallet_indices, Config}; use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; -use sp_runtime::testing::Header; +use sp_runtime::BuildStorage; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, } @@ -46,13 +42,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = Indices; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -78,7 +73,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -91,7 +86,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], } diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 21d01c14ef9a2..d081cc738b1db 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_indices //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_indices +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_indices. pub trait WeightInfo { @@ -64,8 +68,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 27_250_000 picoseconds. - Weight::from_parts(27_829_000, 3534) + // Minimum execution time: 25_491_000 picoseconds. + Weight::from_parts(26_456_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -77,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 37_880_000 picoseconds. - Weight::from_parts(38_329_000, 3593) + // Minimum execution time: 38_027_000 picoseconds. + Weight::from_parts(38_749_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -88,8 +92,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 27_455_000 picoseconds. - Weight::from_parts(27_788_000, 3534) + // Minimum execution time: 26_652_000 picoseconds. + Weight::from_parts(27_273_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -101,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_865_000 picoseconds. - Weight::from_parts(30_420_000, 3593) + // Minimum execution time: 29_464_000 picoseconds. + Weight::from_parts(30_959_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -112,8 +116,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_689_000 picoseconds. - Weight::from_parts(30_443_000, 3534) + // Minimum execution time: 29_015_000 picoseconds. + Weight::from_parts(29_714_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -127,8 +131,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 27_250_000 picoseconds. - Weight::from_parts(27_829_000, 3534) + // Minimum execution time: 25_491_000 picoseconds. + Weight::from_parts(26_456_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -140,8 +144,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 37_880_000 picoseconds. - Weight::from_parts(38_329_000, 3593) + // Minimum execution time: 38_027_000 picoseconds. + Weight::from_parts(38_749_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -151,8 +155,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 27_455_000 picoseconds. - Weight::from_parts(27_788_000, 3534) + // Minimum execution time: 26_652_000 picoseconds. + Weight::from_parts(27_273_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -164,8 +168,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_865_000 picoseconds. - Weight::from_parts(30_420_000, 3593) + // Minimum execution time: 29_464_000 picoseconds. + Weight::from_parts(30_959_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -175,8 +179,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_689_000 picoseconds. - Weight::from_parts(30_443_000, 3534) + // Minimum execution time: 29_015_000 picoseconds. + Weight::from_parts(29_714_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/insecure-randomness-collective-flip/Cargo.toml b/frame/insecure-randomness-collective-flip/Cargo.toml index bcebff6a9663f..d078eafacffc8 100644 --- a/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/frame/insecure-randomness-collective-flip/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -35,5 +35,11 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", + "sp-io/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/insecure-randomness-collective-flip/src/lib.rs b/frame/insecure-randomness-collective-flip/src/lib.rs index ad39c4c4fd885..474087777c46e 100644 --- a/frame/insecure-randomness-collective-flip/src/lib.rs +++ b/frame/insecure-randomness-collective-flip/src/lib.rs @@ -74,11 +74,12 @@ use safe_mix::TripletMix; use codec::Encode; use frame_support::{pallet_prelude::Weight, traits::Randomness}; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::traits::{Hash, Saturating}; const RANDOM_MATERIAL_LEN: u32 = 81; -fn block_number_to_index(block_number: T::BlockNumber) -> usize { +fn block_number_to_index(block_number: BlockNumberFor) -> usize { // on_initialize is called on the first block after genesis let index = (block_number - 1u32.into()) % RANDOM_MATERIAL_LEN.into(); index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") @@ -90,7 +91,6 @@ pub use pallet::*; pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -100,7 +100,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(block_number: T::BlockNumber) -> Weight { + fn on_initialize(block_number: BlockNumberFor) -> Weight { let parent_hash = >::parent_hash(); >::mutate(|ref mut values| { @@ -123,7 +123,7 @@ pub mod pallet { StorageValue<_, BoundedVec>, ValueQuery>; } -impl Randomness for Pallet { +impl Randomness> for Pallet { /// This randomness uses a low-influence function, drawing upon the block hashes from the /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone /// observing the chain. Any block producer has significant influence over their block hashes @@ -134,7 +134,7 @@ impl Randomness for Pallet { /// WARNING: Hashing the result of this function will remove any low-influence properties it has /// and mean that all bits of the resulting value are entirely manipulatable by the author of /// the parent block, who can determine the value of `parent_hash`. - fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { + fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { let block_number = >::block_number(); let index = block_number_to_index::(block_number); @@ -164,8 +164,8 @@ mod tests { use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, Header as _, IdentityLookup}, + BuildStorage, }; use frame_support::{ @@ -174,16 +174,12 @@ mod tests { }; use frame_system::limits; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, CollectiveFlip: pallet_insecure_randomness_collective_flip::{Pallet, Storage}, } ); @@ -199,14 +195,13 @@ mod tests { type BlockLength = BlockLength; type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -223,7 +218,7 @@ mod tests { impl pallet_insecure_randomness_collective_flip::Config for Test {} fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); t.into() } diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 4d8839612b2ef..a46bf7feff65d 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -12,21 +12,21 @@ description = "FRAME Participation Lottery Pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] frame-support-test = { version = "3.0.0", path = "../support/test" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -38,10 +38,22 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "frame-support-test/std", + "pallet-balances/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-support-test/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 178f221a8946f..a6a94b4ab19af 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -142,7 +142,7 @@ pub mod pallet { type Currency: ReservableCurrency; /// Something that provides randomness in the runtime. - type Randomness: Randomness; + type Randomness: Randomness>; /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -208,7 +208,7 @@ pub mod pallet { /// The configuration for the current lottery. #[pallet::storage] pub(crate) type Lottery = - StorageValue<_, LotteryConfig>>; + StorageValue<_, LotteryConfig, BalanceOf>>; /// Users who have purchased a ticket. (Lottery Index, Tickets Purchased) #[pallet::storage] @@ -239,7 +239,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { Lottery::::mutate(|mut lottery| -> Weight { if let Some(config) = &mut lottery { let payout_block = @@ -350,8 +350,8 @@ pub mod pallet { pub fn start_lottery( origin: OriginFor, price: BalanceOf, - length: T::BlockNumber, - delay: T::BlockNumber, + length: BlockNumberFor, + delay: BlockNumberFor, repeat: bool, ) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index 7afd0e319db34..aefb6a1cce2bf 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -28,21 +28,16 @@ use frame_support_test::TestRandomness; use frame_system::EnsureRoot; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, + BuildStorage, Perbill, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, } @@ -58,14 +53,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; - type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -91,7 +85,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -116,7 +110,7 @@ pub type SystemCall = frame_system::Call; pub type BalancesCall = pallet_balances::Call; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index c21b09e7d5b92..3b4e562375345 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_lottery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_lottery +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_lottery. pub trait WeightInfo { @@ -77,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452` // Estimated: `3593` - // Minimum execution time: 61_502_000 picoseconds. - Weight::from_parts(62_578_000, 3593) + // Minimum execution time: 60_298_000 picoseconds. + Weight::from_parts(62_058_000, 3593) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -89,10 +93,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_282_000 picoseconds. - Weight::from_parts(9_271_031, 0) - // Standard Error: 3_756 - .saturating_add(Weight::from_parts(349_990, 0).saturating_mul(n.into())) + // Minimum execution time: 7_291_000 picoseconds. + Weight::from_parts(8_178_186, 0) + // Standard Error: 3_048 + .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Lottery Lottery (r:1 w:1) @@ -105,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `3593` - // Minimum execution time: 38_975_000 picoseconds. - Weight::from_parts(39_552_000, 3593) + // Minimum execution time: 36_741_000 picoseconds. + Weight::from_parts(38_288_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -116,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1514` - // Minimum execution time: 8_243_000 picoseconds. - Weight::from_parts(8_359_000, 1514) + // Minimum execution time: 7_270_000 picoseconds. + Weight::from_parts(7_578_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,10 +137,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `558` // Estimated: `6196` - // Minimum execution time: 76_062_000 picoseconds. - Weight::from_parts(76_547_000, 6196) + // Minimum execution time: 76_611_000 picoseconds. + Weight::from_parts(78_107_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -154,10 +158,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `558` // Estimated: `6196` - // Minimum execution time: 78_089_000 picoseconds. - Weight::from_parts(78_632_000, 6196) + // Minimum execution time: 78_731_000 picoseconds. + Weight::from_parts(80_248_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -183,8 +187,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452` // Estimated: `3593` - // Minimum execution time: 61_502_000 picoseconds. - Weight::from_parts(62_578_000, 3593) + // Minimum execution time: 60_298_000 picoseconds. + Weight::from_parts(62_058_000, 3593) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -195,10 +199,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_282_000 picoseconds. - Weight::from_parts(9_271_031, 0) - // Standard Error: 3_756 - .saturating_add(Weight::from_parts(349_990, 0).saturating_mul(n.into())) + // Minimum execution time: 7_291_000 picoseconds. + Weight::from_parts(8_178_186, 0) + // Standard Error: 3_048 + .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Lottery Lottery (r:1 w:1) @@ -211,8 +215,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161` // Estimated: `3593` - // Minimum execution time: 38_975_000 picoseconds. - Weight::from_parts(39_552_000, 3593) + // Minimum execution time: 36_741_000 picoseconds. + Weight::from_parts(38_288_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -222,8 +226,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1514` - // Minimum execution time: 8_243_000 picoseconds. - Weight::from_parts(8_359_000, 1514) + // Minimum execution time: 7_270_000 picoseconds. + Weight::from_parts(7_578_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -239,10 +243,10 @@ impl WeightInfo for () { /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `558` // Estimated: `6196` - // Minimum execution time: 76_062_000 picoseconds. - Weight::from_parts(76_547_000, 6196) + // Minimum execution time: 76_611_000 picoseconds. + Weight::from_parts(78_107_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -260,10 +264,10 @@ impl WeightInfo for () { /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `524` + // Measured: `558` // Estimated: `6196` - // Minimum execution time: 78_089_000 picoseconds. - Weight::from_parts(78_632_000, 6196) + // Minimum execution time: 78_731_000 picoseconds. + Weight::from_parts(80_248_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 330d9401df2c7..e41a1e0919627 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -13,16 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -44,4 +44,8 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 74891186a4e22..24b9fb5fe9069 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -108,11 +108,12 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { pub members: BoundedVec, + #[serde(skip)] pub phantom: PhantomData, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { use sp_std::collections::btree_set::BTreeSet; let members_set: BTreeSet<_> = self.members.iter().collect(); @@ -524,26 +525,22 @@ mod tests { use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + BuildStorage, }; use frame_support::{ assert_noop, assert_ok, bounded_vec, ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, StorageVersion}, + traits::{ConstU32, ConstU64, StorageVersion}, }; use frame_system::EnsureSignedBy; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Membership: pallet_membership::{Pallet, Call, Storage, Config, Event}, } ); @@ -559,14 +556,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -629,7 +625,7 @@ mod tests { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); // We use default for brevity, but you can configure as desired if needed. pallet_membership::GenesisConfig:: { members: bounded_vec![10, 20, 30], @@ -642,7 +638,7 @@ mod tests { #[cfg(feature = "runtime-benchmarks")] pub(crate) fn new_bench_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } #[cfg(feature = "runtime-benchmarks")] diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index 70c50d8695dfc..18ea7fcb315a3 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_membership //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_membership +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_membership. pub trait WeightInfo { @@ -71,12 +75,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + m * (64 ±0)` + // Measured: `208 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 18_264_000 picoseconds. - Weight::from_parts(19_343_697, 4687) - // Standard Error: 699 - .saturating_add(Weight::from_parts(44_401, 0).saturating_mul(m.into())) + // Minimum execution time: 17_040_000 picoseconds. + Weight::from_parts(18_344_571, 4687) + // Standard Error: 847 + .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -94,12 +98,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_092_000 picoseconds. - Weight::from_parts(22_140_391, 4687) - // Standard Error: 545 - .saturating_add(Weight::from_parts(40_638, 0).saturating_mul(m.into())) + // Minimum execution time: 20_088_000 picoseconds. + Weight::from_parts(21_271_384, 4687) + // Standard Error: 786 + .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -117,12 +121,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_002_000 picoseconds. - Weight::from_parts(22_364_944, 4687) - // Standard Error: 752 - .saturating_add(Weight::from_parts(54_363, 0).saturating_mul(m.into())) + // Minimum execution time: 20_308_000 picoseconds. + Weight::from_parts(21_469_843, 4687) + // Standard Error: 782 + .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -140,12 +144,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_443_000 picoseconds. - Weight::from_parts(22_188_301, 4687) - // Standard Error: 945 - .saturating_add(Weight::from_parts(162_799, 0).saturating_mul(m.into())) + // Minimum execution time: 19_464_000 picoseconds. + Weight::from_parts(21_223_702, 4687) + // Standard Error: 1_068 + .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -163,12 +167,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_527_000 picoseconds. - Weight::from_parts(23_146_706, 4687) - // Standard Error: 724 - .saturating_add(Weight::from_parts(55_027, 0).saturating_mul(m.into())) + // Minimum execution time: 20_965_000 picoseconds. + Weight::from_parts(22_551_007, 4687) + // Standard Error: 860 + .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -184,10 +188,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `32 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 8_054_000 picoseconds. - Weight::from_parts(8_558_341, 4687) - // Standard Error: 360 - .saturating_add(Weight::from_parts(16_362, 0).saturating_mul(m.into())) + // Minimum execution time: 7_481_000 picoseconds. + Weight::from_parts(7_959_053, 4687) + // Standard Error: 364 + .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -197,12 +201,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn clear_prime(_m: u32, ) -> Weight { + fn clear_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_784_000 picoseconds. - Weight::from_parts(4_100_031, 0) + // Minimum execution time: 3_373_000 picoseconds. + Weight::from_parts(3_750_452, 0) + // Standard Error: 142 + .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -220,12 +226,12 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `174 + m * (64 ±0)` + // Measured: `208 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 18_264_000 picoseconds. - Weight::from_parts(19_343_697, 4687) - // Standard Error: 699 - .saturating_add(Weight::from_parts(44_401, 0).saturating_mul(m.into())) + // Minimum execution time: 17_040_000 picoseconds. + Weight::from_parts(18_344_571, 4687) + // Standard Error: 847 + .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -243,12 +249,12 @@ impl WeightInfo for () { /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_092_000 picoseconds. - Weight::from_parts(22_140_391, 4687) - // Standard Error: 545 - .saturating_add(Weight::from_parts(40_638, 0).saturating_mul(m.into())) + // Minimum execution time: 20_088_000 picoseconds. + Weight::from_parts(21_271_384, 4687) + // Standard Error: 786 + .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -266,12 +272,12 @@ impl WeightInfo for () { /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_002_000 picoseconds. - Weight::from_parts(22_364_944, 4687) - // Standard Error: 752 - .saturating_add(Weight::from_parts(54_363, 0).saturating_mul(m.into())) + // Minimum execution time: 20_308_000 picoseconds. + Weight::from_parts(21_469_843, 4687) + // Standard Error: 782 + .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -289,12 +295,12 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_443_000 picoseconds. - Weight::from_parts(22_188_301, 4687) - // Standard Error: 945 - .saturating_add(Weight::from_parts(162_799, 0).saturating_mul(m.into())) + // Minimum execution time: 19_464_000 picoseconds. + Weight::from_parts(21_223_702, 4687) + // Standard Error: 1_068 + .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -312,12 +318,12 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `278 + m * (64 ±0)` + // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_527_000 picoseconds. - Weight::from_parts(23_146_706, 4687) - // Standard Error: 724 - .saturating_add(Weight::from_parts(55_027, 0).saturating_mul(m.into())) + // Minimum execution time: 20_965_000 picoseconds. + Weight::from_parts(22_551_007, 4687) + // Standard Error: 860 + .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -333,10 +339,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `32 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 8_054_000 picoseconds. - Weight::from_parts(8_558_341, 4687) - // Standard Error: 360 - .saturating_add(Weight::from_parts(16_362, 0).saturating_mul(m.into())) + // Minimum execution time: 7_481_000 picoseconds. + Weight::from_parts(7_959_053, 4687) + // Standard Error: 364 + .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -346,12 +352,14 @@ impl WeightInfo for () { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn clear_prime(_m: u32, ) -> Weight { + fn clear_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_784_000 picoseconds. - Weight::from_parts(4_100_031, 0) + // Minimum execution time: 3_373_000 picoseconds. + Weight::from_parts(3_750_452, 0) + // Standard Error: 142 + .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 5f6094af1648e..1d7739eb93c2c 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -12,19 +12,19 @@ description = "FRAME Merkle Mountain Range pallet." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../../primitives/merkle-mountain-range" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" env_logger = "0.9" itertools = "0.10.3" @@ -42,5 +42,14 @@ std = [ "sp-runtime/std", "sp-std/std", ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 4ef833e6c5fca..2edef9a35d57a 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -57,6 +57,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::{log, weights::Weight}; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; use sp_mmr_primitives::utils; use sp_runtime::{ traits::{self, One, Saturating}, @@ -91,7 +92,7 @@ pub struct ParentNumberAndHash { } impl LeafDataProvider for ParentNumberAndHash { - type LeafData = (::BlockNumber, ::Hash); + type LeafData = (BlockNumberFor, ::Hash); fn leaf_data() -> Self::LeafData { ( @@ -113,12 +114,13 @@ type LeafOf = <>::LeafData as primitives::LeafDataProvider> /// Hashing used for the pallet. pub(crate) type HashingOf = >::Hashing; +/// Hash type used for the pallet. +pub(crate) type HashOf = <>::Hashing as traits::Hash>::Output; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -146,25 +148,7 @@ pub mod pallet { /// /// Then we create a tuple of these two hashes, SCALE-encode it (concatenate) and /// hash, to obtain a new MMR inner node - the new peak. - type Hashing: traits::Hash>::Hash>; - - /// The hashing output type. - /// - /// This type is actually going to be stored in the MMR. - /// Required to be provided again, to satisfy trait bounds for storage items. - type Hash: traits::Member - + traits::MaybeSerializeDeserialize - + sp_std::fmt::Debug - + sp_std::hash::Hash - + AsRef<[u8]> - + AsMut<[u8]> - + Copy - + Default - + codec::Codec - + codec::EncodeLike - + scale_info::TypeInfo - + MaxEncodedLen - + PartialOrd; + type Hashing: traits::Hash; /// Data stored in the leaf nodes. /// @@ -190,7 +174,7 @@ pub mod pallet { /// apart from having it in the storage. For instance you might output it in the header /// digest (see [`frame_system::Pallet::deposit_log`]) to make it available for Light /// Clients. Hook complexity should be `O(1)`. - type OnNewRoot: primitives::OnNewRoot<>::Hash>; + type OnNewRoot: primitives::OnNewRoot>; /// Weights for this pallet. type WeightInfo: WeightInfo; @@ -199,8 +183,7 @@ pub mod pallet { /// Latest MMR Root hash. #[pallet::storage] #[pallet::getter(fn mmr_root_hash)] - pub type RootHash, I: 'static = ()> = - StorageValue<_, >::Hash, ValueQuery>; + pub type RootHash, I: 'static = ()> = StorageValue<_, HashOf, ValueQuery>; /// Current size of the MMR (number of leaves). #[pallet::storage] @@ -214,11 +197,11 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn mmr_peak)] pub type Nodes, I: 'static = ()> = - StorageMap<_, Identity, NodeIndex, >::Hash, OptionQuery>; + StorageMap<_, Identity, NodeIndex, HashOf, OptionQuery>; #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { - fn on_initialize(_n: T::BlockNumber) -> Weight { + fn on_initialize(_n: BlockNumberFor) -> Weight { use primitives::LeafDataProvider; let leaves = Self::mmr_leaves(); let peaks_before = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks(); @@ -283,11 +266,7 @@ impl, I: 'static> Pallet { pos: NodeIndex, parent_hash: ::Hash, ) -> sp_std::prelude::Vec { - NodesUtils::node_temp_offchain_key::<::Header>( - &T::INDEXING_PREFIX, - pos, - parent_hash, - ) + NodesUtils::node_temp_offchain_key::>(&T::INDEXING_PREFIX, pos, parent_hash) } /// Build canonical offchain key for node `pos` in MMR. @@ -303,7 +282,7 @@ impl, I: 'static> Pallet { fn leaf_index_to_parent_block_num( leaf_index: LeafIndex, leaves_count: LeafIndex, - ) -> ::BlockNumber { + ) -> BlockNumberFor { // leaves are zero-indexed and were added one per block since pallet activation, // while block numbers are one-indexed, so block number that added `leaf_idx` is: // `block_num = block_num_when_pallet_activated + leaf_idx + 1` @@ -315,16 +294,16 @@ impl, I: 'static> Pallet { } /// Convert a block number into a leaf index. - fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result + fn block_num_to_leaf_index(block_num: BlockNumberFor) -> Result where T: frame_system::Config, { - let first_mmr_block = utils::first_mmr_block_num::( + let first_mmr_block = utils::first_mmr_block_num::>( >::block_number(), Self::mmr_leaves(), )?; - utils::block_num_to_leaf_index::(block_num, first_mmr_block) + utils::block_num_to_leaf_index::>(block_num, first_mmr_block) } /// Generate an MMR proof for the given `block_numbers`. @@ -337,9 +316,9 @@ impl, I: 'static> Pallet { /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_proof( - block_numbers: Vec, - best_known_block_number: Option, - ) -> Result<(Vec>, primitives::Proof<>::Hash>), primitives::Error> { + block_numbers: Vec>, + best_known_block_number: Option>, + ) -> Result<(Vec>, primitives::Proof>), primitives::Error> { // check whether best_known_block_number provided, else use current best block let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); @@ -360,7 +339,7 @@ impl, I: 'static> Pallet { } /// Return the on-chain MMR root hash. - pub fn mmr_root() -> >::Hash { + pub fn mmr_root() -> HashOf { Self::mmr_root_hash() } @@ -372,7 +351,7 @@ impl, I: 'static> Pallet { /// or the proof is invalid. pub fn verify_leaves( leaves: Vec>, - proof: primitives::Proof<>::Hash>, + proof: primitives::Proof>, ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index beb4ac977c3be..aeb3e7ea66414 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -21,7 +21,7 @@ use crate::{ Hasher, Node, NodeOf, }, primitives::{self, Error, NodeIndex}, - Config, HashingOf, + Config, HashOf, HashingOf, }; use sp_mmr_primitives::{mmr_lib, utils::NodesUtils}; use sp_std::prelude::*; @@ -95,7 +95,7 @@ where pub fn verify_leaves_proof( &self, leaves: Vec, - proof: primitives::Proof<>::Hash>, + proof: primitives::Proof>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), @@ -145,7 +145,7 @@ where /// Commit the changes to underlying storage, return current number of leaves and /// calculate the new MMR's root hash. - pub fn finalize(self) -> Result<(NodeIndex, >::Hash), Error> { + pub fn finalize(self) -> Result<(NodeIndex, HashOf), Error> { let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; Ok((self.leaves, root.hash())) @@ -166,7 +166,7 @@ where pub fn generate_proof( &self, leaf_indices: Vec, - ) -> Result<(Vec, primitives::Proof<>::Hash>), Error> { + ) -> Result<(Vec, primitives::Proof>), Error> { let positions = leaf_indices .iter() .map(|index| mmr_lib::leaf_index_to_pos(*index)) diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index 292c80a483325..ecc254278bf0f 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -25,21 +25,14 @@ use frame_support::{ }; use sp_core::H256; use sp_mmr_primitives::{Compact, LeafDataProvider}; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup, Keccak256}, -}; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Keccak256}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, MMR: pallet_mmr::{Pallet, Storage}, } ); @@ -48,13 +41,12 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -75,7 +67,6 @@ impl Config for Test { const INDEXING_PREFIX: &'static [u8] = b"mmr-"; type Hashing = Keccak256; - type Hash = H256; type LeafData = Compact, LeafData)>; type OnNewRoot = (); type WeightInfo = (); diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index b628b51d2938b..429df75182eee 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -23,9 +23,10 @@ use sp_core::{ H256, }; use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, Proof}; +use sp_runtime::BuildStorage; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { @@ -53,7 +54,7 @@ pub(crate) fn hex(s: &str) -> H256 { s.parse().unwrap() } -type BlockNumber = ::BlockNumber; +type BlockNumber = frame_system::pallet_prelude::BlockNumberFor; fn decode_node( v: Vec, diff --git a/frame/message-queue/Cargo.toml b/frame/message-queue/Cargo.toml index 404e678137211..929b713b7eb2e 100644 --- a/frame/message-queue/Cargo.toml +++ b/frame/message-queue/Cargo.toml @@ -9,24 +9,24 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to queue and process messages" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.137", optional = true, features = ["derive"] } +serde = { version = "1.0.163", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-weights = { version = "20.0.0", default-features = false, path = "../../primitives/weights" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } rand = "0.8.5" rand_distr = "0.4.3" @@ -44,10 +44,16 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "sp-tracing/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/message-queue/src/benchmarking.rs b/frame/message-queue/src/benchmarking.rs index b53527048ac61..bbd321ceadd1a 100644 --- a/frame/message-queue/src/benchmarking.rs +++ b/frame/message-queue/src/benchmarking.rs @@ -142,7 +142,7 @@ mod benchmarks { // Check that it was processed. assert_last_event::( Event::Processed { - hash: T::Hashing::hash(&msg), + id: sp_io::hashing::blake2_256(&msg), origin: 0.into(), weight_used: 1.into_weight(), success: true, @@ -166,7 +166,7 @@ mod benchmarks { } assert_eq!(ServiceHead::::get().unwrap(), 10u32.into()); - assert_eq!(weight.consumed, T::WeightInfo::bump_service_head()); + assert_eq!(weight.consumed(), T::WeightInfo::bump_service_head()); } #[benchmark] @@ -227,7 +227,7 @@ mod benchmarks { assert_last_event::( Event::Processed { - hash: T::Hashing::hash(&((msgs - 1) as u32).encode()), + id: sp_io::hashing::blake2_256(&((msgs - 1) as u32).encode()), origin: 0.into(), weight_used: Weight::from_parts(1, 1), success: true, @@ -264,7 +264,7 @@ mod benchmarks { assert_last_event::( Event::Processed { - hash: T::Hashing::hash(&((msgs - 1) as u32).encode()), + id: sp_io::hashing::blake2_256(&((msgs - 1) as u32).encode()), origin: 0.into(), weight_used: Weight::from_parts(1, 1), success: true, diff --git a/frame/message-queue/src/integration_test.rs b/frame/message-queue/src/integration_test.rs index 255098b3b1415..a1003edf3c92f 100644 --- a/frame/message-queue/src/integration_test.rs +++ b/frame/message-queue/src/integration_test.rs @@ -22,8 +22,8 @@ use crate::{ mock::{ - new_test_ext, CountingMessageProcessor, IntoWeight, MockedWeightInfo, NumMessagesProcessed, - SuspendedQueues, + build_and_execute, CountingMessageProcessor, IntoWeight, MockedWeightInfo, + NumMessagesProcessed, YieldingQueues, }, mock_helpers::MessageOrigin, *, @@ -37,22 +37,15 @@ use frame_support::{ use rand::{rngs::StdRng, Rng, SeedableRng}; use rand_distr::Pareto; use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use std::collections::{BTreeMap, BTreeSet}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, } ); @@ -63,14 +56,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -96,6 +88,7 @@ impl Config for Test { type MessageProcessor = CountingMessageProcessor; type Size = u32; type QueueChangeHandler = (); + type QueuePausedQuery = (); type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; @@ -130,7 +123,7 @@ fn stress_test_enqueue_and_service() { let max_msg_len = MaxMessageLenOf::::get(); let mut rng = StdRng::seed_from_u64(42); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let mut msgs_remaining = 0; for _ in 0..blocks { // Start by enqueuing a large number of messages. @@ -178,7 +171,7 @@ fn stress_test_queue_suspension() { let max_msg_len = MaxMessageLenOf::::get(); let mut rng = StdRng::seed_from_u64(41); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let mut suspended = BTreeSet::::new(); let mut msgs_remaining = 0; @@ -207,7 +200,7 @@ fn stress_test_queue_suspension() { to_resume, per_queue.len() ); - SuspendedQueues::set(suspended.iter().map(|q| MessageOrigin::Everywhere(*q)).collect()); + YieldingQueues::set(suspended.iter().map(|q| MessageOrigin::Everywhere(*q)).collect()); // Pick a fraction of all messages currently in queue and process them. let resumed_messages = @@ -229,7 +222,7 @@ fn stress_test_queue_suspension() { process_all_messages(resumed_messages); msgs_remaining -= resumed_messages; - let resumed = SuspendedQueues::take(); + let resumed = YieldingQueues::take(); log::info!("Resumed all {} suspended queues", resumed.len()); log::info!("Processing all remaining {} messages", msgs_remaining); process_all_messages(msgs_remaining); diff --git a/frame/message-queue/src/lib.rs b/frame/message-queue/src/lib.rs index c8e1976103ebf..5acc3e9d5a138 100644 --- a/frame/message-queue/src/lib.rs +++ b/frame/message-queue/src/lib.rs @@ -195,7 +195,7 @@ use frame_support::{ pallet_prelude::*, traits::{ DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, - ProcessMessageError, ServiceQueues, + ProcessMessageError, QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -204,7 +204,7 @@ pub use pallet::*; use scale_info::TypeInfo; use sp_arithmetic::traits::{BaseArithmetic, Unsigned}; use sp_runtime::{ - traits::{Hash, One, Zero}, + traits::{One, Zero}, SaturatedConversion, Saturating, }; use sp_std::{fmt::Debug, ops::Deref, prelude::*, vec}; @@ -473,6 +473,13 @@ pub mod pallet { /// removed. type QueueChangeHandler: OnQueueChanged<::Origin>; + /// Queried by the pallet to check whether a queue can be serviced. + /// + /// This also applies to manual servicing via `execute_overweight` and `service_queues`. The + /// value of this is only polled once before servicing the queue. This means that changes to + /// it that happen *within* the servicing will not be reflected. + type QueuePausedQuery: QueuePausedQuery<::Origin>; + /// The size of the page; this implies the maximum message size which can be sent. /// /// A good value depends on the expected message sizes, their weights, the weight that is @@ -499,16 +506,13 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Message discarded due to an inability to decode the item. Usually caused by state - /// corruption. - Discarded { hash: T::Hash }, /// Message discarded due to an error in the `MessageProcessor` (usually a format error). - ProcessingFailed { hash: T::Hash, origin: MessageOriginOf, error: ProcessMessageError }, + ProcessingFailed { id: [u8; 32], origin: MessageOriginOf, error: ProcessMessageError }, /// Message is processed. - Processed { hash: T::Hash, origin: MessageOriginOf, weight_used: Weight, success: bool }, + Processed { id: [u8; 32], origin: MessageOriginOf, weight_used: Weight, success: bool }, /// Message placed in overweight queue. OverweightEnqueued { - hash: T::Hash, + id: [u8; 32], origin: MessageOriginOf, page_index: PageIndex, message_index: T::Size, @@ -537,6 +541,10 @@ pub mod pallet { /// Such errors are expected, but not guaranteed, to resolve themselves eventually through /// retrying. TemporarilyUnprocessable, + /// The queue is paused and no message can be executed from it. + /// + /// This can change at any time and may resolve in the future by re-trying. + QueuePaused, } /// The index of the first and last (non-empty) pages. @@ -570,7 +578,12 @@ pub mod pallet { } } - /// Check all assumptions about [`crate::Config`]. + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + + /// Check all compile-time assumptions about [`crate::Config`]. fn integrity_test() { assert!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); } @@ -729,7 +742,7 @@ impl Pallet { /// /// Returns the current head if it got be bumped and `None` otherwise. fn bump_service_head(weight: &mut WeightMeter) -> Option> { - if !weight.check_accrue(T::WeightInfo::bump_service_head()) { + if weight.try_consume(T::WeightInfo::bump_service_head()).is_err() { return None } @@ -806,6 +819,8 @@ impl Pallet { weight_limit: Weight, ) -> Result> { let mut book_state = BookStateFor::::get(&origin); + ensure!(!T::QueuePausedQuery::is_paused(&origin), Error::::QueuePaused); + let mut page = Pages::::get(&origin, page_index).ok_or(Error::::NoPage)?; let (pos, is_processed, payload) = page.peek_index(index.into() as usize).ok_or(Error::::NoMessage)?; @@ -855,7 +870,7 @@ impl Pallet { book_state.message_count, book_state.size, ); - Ok(weight_counter.consumed.saturating_add(page_weight)) + Ok(weight_counter.consumed().saturating_add(page_weight)) }, } } @@ -938,14 +953,22 @@ impl Pallet { overweight_limit: Weight, ) -> (bool, Option>) { use PageExecutionStatus::*; - if !weight.check_accrue( - T::WeightInfo::service_queue_base().saturating_add(T::WeightInfo::ready_ring_unknit()), - ) { + if weight + .try_consume( + T::WeightInfo::service_queue_base() + .saturating_add(T::WeightInfo::ready_ring_unknit()), + ) + .is_err() + { return (false, None) } let mut book_state = BookStateFor::::get(&origin); let mut total_processed = 0; + if T::QueuePausedQuery::is_paused(&origin) { + let next_ready = book_state.ready_neighbours.as_ref().map(|x| x.next.clone()); + return (false, next_ready) + } while book_state.end > book_state.begin { let (processed, status) = @@ -989,10 +1012,13 @@ impl Pallet { overweight_limit: Weight, ) -> (u32, PageExecutionStatus) { use PageExecutionStatus::*; - if !weight.check_accrue( - T::WeightInfo::service_page_base_completion() - .max(T::WeightInfo::service_page_base_no_completion()), - ) { + if weight + .try_consume( + T::WeightInfo::service_page_base_completion() + .max(T::WeightInfo::service_page_base_no_completion()), + ) + .is_err() + { return (0, Bailed) } @@ -1052,7 +1078,7 @@ impl Pallet { if page.is_complete() { return ItemExecutionStatus::NoItem } - if !weight.check_accrue(T::WeightInfo::service_page_item()) { + if weight.try_consume(T::WeightInfo::service_page_item()).is_err() { return ItemExecutionStatus::Bailed } @@ -1084,6 +1110,106 @@ impl Pallet { ItemExecutionStatus::Executed(is_processed) } + /// Ensure the correctness of state of this pallet. + /// + /// # Assumptions- + /// + /// If `serviceHead` points to a ready Queue, then BookState of that Queue has: + /// + /// * `message_count` > 0 + /// * `size` > 0 + /// * `end` > `begin` + /// * Some(ready_neighbours) + /// * If `ready_neighbours.next` == self.origin, then `ready_neighbours.prev` == self.origin + /// (only queue in ring) + /// + /// For Pages(begin to end-1) in BookState: + /// + /// * `remaining` > 0 + /// * `remaining_size` > 0 + /// * `first` <= `last` + /// * Every page can be decoded into peek_* functions + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // Checking memory corruption for BookStateFor + ensure!( + BookStateFor::::iter_keys().count() == BookStateFor::::iter_values().count(), + "Memory Corruption in BookStateFor" + ); + // Checking memory corruption for Pages + ensure!( + Pages::::iter_keys().count() == Pages::::iter_values().count(), + "Memory Corruption in Pages" + ); + + // No state to check + if ServiceHead::::get().is_none() { + return Ok(()) + } + + //loop around this origin + let starting_origin = ServiceHead::::get().unwrap(); + + while let Some(head) = Self::bump_service_head(&mut WeightMeter::max_limit()) { + ensure!( + BookStateFor::::contains_key(&head), + "Service head must point to an existing book" + ); + + let head_book_state = BookStateFor::::get(&head); + ensure!( + head_book_state.message_count > 0, + "There must be some messages if in ReadyRing" + ); + ensure!(head_book_state.size > 0, "There must be some message size if in ReadyRing"); + ensure!( + head_book_state.end > head_book_state.begin, + "End > Begin if unprocessed messages exists" + ); + ensure!( + head_book_state.ready_neighbours.is_some(), + "There must be neighbours if in ReadyRing" + ); + + if head_book_state.ready_neighbours.as_ref().unwrap().next == head { + ensure!( + head_book_state.ready_neighbours.as_ref().unwrap().prev == head, + "Can only happen if only queue in ReadyRing" + ); + } + + for page_index in head_book_state.begin..head_book_state.end { + let page = Pages::::get(&head, page_index).unwrap(); + let remaining_messages = page.remaining; + let mut counted_remaining_messages = 0; + ensure!( + remaining_messages > 0.into(), + "These must be some messages that have not been processed yet!" + ); + + for i in 0..u32::MAX { + if let Some((_, processed, _)) = page.peek_index(i as usize) { + if !processed { + counted_remaining_messages += 1; + } + } else { + break + } + } + + ensure!( + remaining_messages == counted_remaining_messages.into(), + "Memory Corruption" + ); + } + + if head_book_state.ready_neighbours.as_ref().unwrap().next == starting_origin { + break + } + } + Ok(()) + } + /// Print the pages in each queue and the messages in each page. /// /// Processed messages are prefixed with a `*` and the current `begin`ning page with a `>`. @@ -1147,15 +1273,16 @@ impl Pallet { meter: &mut WeightMeter, overweight_limit: Weight, ) -> MessageExecutionStatus { - let hash = T::Hashing::hash(message); + let hash = sp_io::hashing::blake2_256(message); use ProcessMessageError::*; - let prev_consumed = meter.consumed; + let prev_consumed = meter.consumed(); + let mut id = hash; - match T::MessageProcessor::process_message(message, origin.clone(), meter) { + match T::MessageProcessor::process_message(message, origin.clone(), meter, &mut id) { Err(Overweight(w)) if w.any_gt(overweight_limit) => { // Permanently overweight. Self::deposit_event(Event::::OverweightEnqueued { - hash, + id, origin, page_index, message_index, @@ -1173,13 +1300,13 @@ impl Pallet { }, Err(error @ BadFormat | error @ Corrupt | error @ Unsupported) => { // Permanent error - drop - Self::deposit_event(Event::::ProcessingFailed { hash, origin, error }); + Self::deposit_event(Event::::ProcessingFailed { id, origin, error }); MessageExecutionStatus::Unprocessable { permanent: true } }, Ok(success) => { // Success - let weight_used = meter.consumed.saturating_sub(prev_consumed); - Self::deposit_event(Event::::Processed { hash, origin, weight_used, success }); + let weight_used = meter.consumed().saturating_sub(prev_consumed); + Self::deposit_event(Event::::Processed { id, origin, weight_used, success }); MessageExecutionStatus::Processed }, } @@ -1239,7 +1366,7 @@ impl ServiceQueues for Pallet { let mut next = match Self::bump_service_head(&mut weight) { Some(h) => h, - None => return weight.consumed, + None => return weight.consumed(), }; // The last queue that did not make any progress. // The loop aborts as soon as it arrives at this queue again without making any progress @@ -1265,7 +1392,7 @@ impl ServiceQueues for Pallet { None => break, } } - weight.consumed + weight.consumed() } /// Execute a single overweight message. @@ -1276,17 +1403,24 @@ impl ServiceQueues for Pallet { (message_origin, page, index): Self::OverweightMessageAddress, ) -> Result { let mut weight = WeightMeter::from_limit(weight_limit); - if !weight.check_accrue( - T::WeightInfo::execute_overweight_page_removed() - .max(T::WeightInfo::execute_overweight_page_updated()), - ) { + if weight + .try_consume( + T::WeightInfo::execute_overweight_page_removed() + .max(T::WeightInfo::execute_overweight_page_updated()), + ) + .is_err() + { return Err(ExecuteOverweightError::InsufficientWeight) } Pallet::::do_execute_overweight(message_origin, page, index, weight.remaining()).map_err( |e| match e { Error::::InsufficientWeight => ExecuteOverweightError::InsufficientWeight, - _ => ExecuteOverweightError::NotFound, + Error::::AlreadyProcessed => ExecuteOverweightError::AlreadyProcessed, + Error::::QueuePaused => ExecuteOverweightError::QueuePaused, + Error::::NoPage | Error::::NoMessage | Error::::Queued => + ExecuteOverweightError::NotFound, + _ => ExecuteOverweightError::Other, }, ) } diff --git a/frame/message-queue/src/mock.rs b/frame/message-queue/src/mock.rs index a0fe0105671e0..473c5faac4c5d 100644 --- a/frame/message-queue/src/mock.rs +++ b/frame/message-queue/src/mock.rs @@ -29,21 +29,17 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; use sp_std::collections::btree_map::BTreeMap; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, } ); @@ -53,14 +49,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -84,6 +79,7 @@ impl Config for Test { type MessageProcessor = RecordingMessageProcessor; type Size = u32; type QueueChangeHandler = RecordingQueueChangeHandler; + type QueuePausedQuery = MockedQueuePauser; type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; @@ -154,7 +150,8 @@ impl crate::weights::WeightInfo for MockedWeightInfo { parameter_types! { pub static MessagesProcessed: Vec<(Vec, MessageOrigin)> = vec![]; - pub static SuspendedQueues: Vec = vec![]; + /// Queues that should return `Yield` upon being processed. + pub static YieldingQueues: Vec = vec![]; } /// A message processor which records all processed messages into [`MessagesProcessed`]. @@ -172,6 +169,7 @@ impl ProcessMessage for RecordingMessageProcessor { message: &[u8], origin: Self::Origin, meter: &mut WeightMeter, + _id: &mut [u8; 32], ) -> Result { processing_message(message, &origin)?; @@ -190,7 +188,7 @@ impl ProcessMessage for RecordingMessageProcessor { }; let required = Weight::from_parts(weight, weight); - if meter.check_accrue(required) { + if meter.try_consume(required).is_ok() { let mut m = MessagesProcessed::get(); m.push((message.to_vec(), origin)); MessagesProcessed::set(m); @@ -204,7 +202,7 @@ impl ProcessMessage for RecordingMessageProcessor { /// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or /// `yield` will fail with an error respectively. fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessMessageError> { - if SuspendedQueues::get().contains(&origin) { + if YieldingQueues::get().contains(&origin) { return Err(ProcessMessageError::Yield) } @@ -239,6 +237,7 @@ impl ProcessMessage for CountingMessageProcessor { message: &[u8], origin: Self::Origin, meter: &mut WeightMeter, + _id: &mut [u8; 32], ) -> Result { if let Err(e) = processing_message(message, &origin) { NumMessagesErrored::set(NumMessagesErrored::get() + 1); @@ -246,7 +245,7 @@ impl ProcessMessage for CountingMessageProcessor { } let required = Weight::from_parts(1, 1); - if meter.check_accrue(required) { + if meter.try_consume(required).is_ok() { NumMessagesProcessed::set(NumMessagesProcessed::get() + 1); Ok(true) } else { @@ -268,23 +267,45 @@ impl OnQueueChanged for RecordingQueueChangeHandler { } } +parameter_types! { + pub static PausedQueues: Vec = vec![]; +} + +pub struct MockedQueuePauser; +impl QueuePausedQuery for MockedQueuePauser { + fn is_paused(id: &MessageOrigin) -> bool { + PausedQueues::get().contains(id) + } +} + /// Create new test externalities. /// /// Is generic since it is used by the unit test, integration tests and benchmarks. pub fn new_test_ext() -> sp_io::TestExternalities where - ::BlockNumber: From, + frame_system::pallet_prelude::BlockNumberFor: From, { sp_tracing::try_init_simple(); WeightForCall::take(); QueueChanges::take(); NumMessagesErrored::take(); - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| frame_system::Pallet::::set_block_number(1.into())); ext } +/// Run the function pointer inside externalities and asserts the try_state hook at the end. +pub fn build_and_execute(test: impl FnOnce() -> ()) +where + BlockNumberFor: From, +{ + new_test_ext::().execute_with(|| { + test(); + MessageQueue::do_try_state().expect("All invariants must hold after a test"); + }); +} + /// Set the weight of a specific weight function. pub fn set_weight(name: &str, w: Weight) { MockedWeightInfo::set_weight::(name, w); diff --git a/frame/message-queue/src/mock_helpers.rs b/frame/message-queue/src/mock_helpers.rs index 257691cae4171..f6109c127be12 100644 --- a/frame/message-queue/src/mock_helpers.rs +++ b/frame/message-queue/src/mock_helpers.rs @@ -62,10 +62,11 @@ where _message: &[u8], _origin: Self::Origin, meter: &mut WeightMeter, + _id: &mut [u8; 32], ) -> Result { let required = Weight::from_parts(REQUIRED_WEIGHT, REQUIRED_WEIGHT); - if meter.check_accrue(required) { + if meter.try_consume(required).is_ok() { Ok(true) } else { Err(ProcessMessageError::Overweight(required)) @@ -88,11 +89,11 @@ pub fn page(msg: &[u8]) -> PageOf { } pub fn single_page_book() -> BookStateOf { - BookState { begin: 0, end: 1, count: 1, ready_neighbours: None, message_count: 0, size: 0 } + BookState { begin: 0, end: 1, count: 1, message_count: 1, size: 1, ..Default::default() } } pub fn empty_book() -> BookStateOf { - BookState { begin: 0, end: 1, count: 1, ready_neighbours: None, message_count: 0, size: 0 } + BookState { begin: 0, end: 1, count: 1, ..Default::default() } } /// Returns a full page of messages with their index as payload and the number of messages. @@ -117,9 +118,9 @@ pub fn book_for(page: &PageOf) -> BookStateOf { count: 1, begin: 0, end: 1, - ready_neighbours: None, message_count: page.remaining.into() as u64, size: page.remaining_size.into() as u64, + ..Default::default() } } @@ -138,10 +139,8 @@ pub fn setup_bump_service_head( current: <::MessageProcessor as ProcessMessage>::Origin, next: <::MessageProcessor as ProcessMessage>::Origin, ) { - let mut book = single_page_book::(); - book.ready_neighbours = Some(Neighbours::> { prev: next.clone(), next }); - ServiceHead::::put(¤t); - BookStateFor::::insert(¤t, &book); + crate::Pallet::::enqueue_message(msg("1"), current); + crate::Pallet::::enqueue_message(msg("1"), next); } /// Knit a queue into the ready-ring and write it back to storage. @@ -163,11 +162,8 @@ pub fn unknit(o: &<::MessageProcessor as ProcessMessage> pub fn build_ring( queues: &[<::MessageProcessor as ProcessMessage>::Origin], ) { - for queue in queues { - BookStateFor::::insert(queue, empty_book::()); - } - for queue in queues { - knit::(queue); + for queue in queues.iter() { + crate::Pallet::::enqueue_message(msg("1"), queue.clone()); } assert_ring::(queues); } diff --git a/frame/message-queue/src/tests.rs b/frame/message-queue/src/tests.rs index 15bb905738531..bcb099a6accd1 100644 --- a/frame/message-queue/src/tests.rs +++ b/frame/message-queue/src/tests.rs @@ -23,25 +23,26 @@ use crate::{mock::*, *}; use frame_support::{assert_noop, assert_ok, assert_storage_noop, StorageNoopGuard}; use rand::{rngs::StdRng, Rng, SeedableRng}; +use sp_core::blake2_256; #[test] fn mocked_weight_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { assert!(::WeightInfo::service_queue_base().is_zero()); }); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_queue_base", Weight::MAX); assert_eq!(::WeightInfo::service_queue_base(), Weight::MAX); }); // The externalities reset it. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { assert!(::WeightInfo::service_queue_base().is_zero()); }); } #[test] fn enqueue_within_one_page_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { use MessageOrigin::*; MessageQueue::enqueue_message(msg("a"), Here); MessageQueue::enqueue_message(msg("b"), Here); @@ -76,7 +77,7 @@ fn enqueue_within_one_page_works() { #[test] fn queue_priority_retains() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { use MessageOrigin::*; assert_ring(&[]); MessageQueue::enqueue_message(msg("a"), Everywhere(1)); @@ -107,11 +108,13 @@ fn queue_priority_retains() { #[test] fn queue_priority_reset_once_serviced() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { use MessageOrigin::*; MessageQueue::enqueue_message(msg("a"), Everywhere(1)); MessageQueue::enqueue_message(msg("b"), Everywhere(2)); MessageQueue::enqueue_message(msg("c"), Everywhere(3)); + MessageQueue::do_try_state().unwrap(); + println!("{}", MessageQueue::debug_info()); // service head is 1, it will process a, leaving service head at 2. it also processes b and // empties queue 2, so service head will end at 3. assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); @@ -134,7 +137,7 @@ fn queue_priority_reset_once_serviced() { #[test] fn service_queues_basic_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); assert_eq!(QueueChanges::take(), vec![(Here, 3, 6), (There, 3, 6)]); @@ -145,13 +148,11 @@ fn service_queues_basic_works() { assert_eq!(QueueChanges::take(), vec![(Here, 2, 5)]); // Service one message from `There`. - ServiceHead::::set(There.into()); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(vmsg("x"), There)]); assert_eq!(QueueChanges::take(), vec![(There, 2, 5)]); // Service the remaining from `Here`. - ServiceHead::::set(Here.into()); assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(vmsg("ab"), Here), (vmsg("abc"), Here)]); assert_eq!(QueueChanges::take(), vec![(Here, 0, 0)]); @@ -160,13 +161,14 @@ fn service_queues_basic_works() { assert_eq!(MessageQueue::service_queues(Weight::MAX), 2.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(vmsg("xy"), There), (vmsg("xyz"), There)]); assert_eq!(QueueChanges::take(), vec![(There, 0, 0)]); + MessageQueue::do_try_state().unwrap(); }); } #[test] fn service_queues_failing_messages_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_page_item", 1.into_weight()); MessageQueue::enqueue_message(msg("badformat"), Here); MessageQueue::enqueue_message(msg("corrupt"), Here); @@ -178,7 +180,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - hash: ::Hashing::hash(b"badformat"), + id: blake2_256(b"badformat"), origin: MessageOrigin::Here, error: ProcessMessageError::BadFormat, } @@ -187,7 +189,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - hash: ::Hashing::hash(b"corrupt"), + id: blake2_256(b"corrupt"), origin: MessageOrigin::Here, error: ProcessMessageError::Corrupt, } @@ -196,7 +198,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( Event::ProcessingFailed { - hash: ::Hashing::hash(b"unsupported"), + id: blake2_256(b"unsupported"), origin: MessageOrigin::Here, error: ProcessMessageError::Unsupported, } @@ -212,7 +214,7 @@ fn service_queues_failing_messages_works() { #[test] fn service_queues_suspension_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { MessageQueue::enqueue_messages(vec![msg("a"), msg("b"), msg("c")].into_iter(), Here); MessageQueue::enqueue_messages(vec![msg("x"), msg("y"), msg("z")].into_iter(), There); MessageQueue::enqueue_messages( @@ -226,8 +228,8 @@ fn service_queues_suspension_works() { assert_eq!(MessagesProcessed::take(), vec![(vmsg("a"), Here)]); assert_eq!(QueueChanges::take(), vec![(Here, 2, 2)]); - // Pause queue `Here` and `Everywhere(0)`. - SuspendedQueues::set(vec![Here, Everywhere(0)]); + // Make queue `Here` and `Everywhere(0)` yield. + YieldingQueues::set(vec![Here, Everywhere(0)]); // Service one message from `There`. assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); @@ -244,13 +246,13 @@ fn service_queues_suspension_works() { assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero()); // ... until we resume `Here`: - SuspendedQueues::set(vec![Everywhere(0)]); + YieldingQueues::set(vec![Everywhere(0)]); assert_eq!(MessageQueue::service_queues(Weight::MAX), 2.into_weight()); assert_eq!(MessagesProcessed::take(), vec![(vmsg("b"), Here), (vmsg("c"), Here)]); // Everywhere still won't move. assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero()); - SuspendedQueues::take(); + YieldingQueues::take(); // Resume `Everywhere(0)` makes it work. assert_eq!(MessageQueue::service_queues(Weight::MAX), 3.into_weight()); assert_eq!( @@ -267,7 +269,7 @@ fn service_queues_suspension_works() { #[test] fn reap_page_permanent_overweight_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Create 10 pages more than the stale limit. let n = (MaxStale::get() + 10) as usize; for _ in 0..n { @@ -307,7 +309,7 @@ fn reaping_overweight_fails_properly() { use MessageOrigin::*; assert_eq!(MaxStale::get(), 2, "The stale limit is two"); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // page 0 MessageQueue::enqueue_message(msg("weight=4"), Here); MessageQueue::enqueue_message(msg("a"), Here); @@ -377,29 +379,29 @@ fn reaping_overweight_fails_properly() { #[test] fn service_queue_bails() { // Not enough weight for `service_queue_base`. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_queue_base", 2.into_weight()); let mut meter = WeightMeter::from_limit(1.into_weight()); assert_storage_noop!(MessageQueue::service_queue(0u32.into(), &mut meter, Weight::MAX)); - assert!(meter.consumed.is_zero()); + assert!(meter.consumed().is_zero()); }); // Not enough weight for `ready_ring_unknit`. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("ready_ring_unknit", 2.into_weight()); let mut meter = WeightMeter::from_limit(1.into_weight()); assert_storage_noop!(MessageQueue::service_queue(0u32.into(), &mut meter, Weight::MAX)); - assert!(meter.consumed.is_zero()); + assert!(meter.consumed().is_zero()); }); // Not enough weight for `service_queue_base` and `ready_ring_unknit`. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_queue_base", 2.into_weight()); set_weight("ready_ring_unknit", 2.into_weight()); let mut meter = WeightMeter::from_limit(3.into_weight()); assert_storage_noop!(MessageQueue::service_queue(0.into(), &mut meter, Weight::MAX)); - assert!(meter.consumed.is_zero()); + assert!(meter.consumed().is_zero()); }); } @@ -408,7 +410,7 @@ fn service_page_works() { use super::integration_test::Test; // Run with larger page size. use MessageOrigin::*; use PageExecutionStatus::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_page_base_completion", 2.into_weight()); set_weight("service_page_item", 3.into_weight()); @@ -445,7 +447,7 @@ fn service_page_works() { #[test] fn service_page_bails() { // Not enough weight for `service_page_base_completion`. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_page_base_completion", 2.into_weight()); let mut meter = WeightMeter::from_limit(1.into_weight()); @@ -459,10 +461,10 @@ fn service_page_bails() { &mut meter, Weight::MAX )); - assert!(meter.consumed.is_zero()); + assert!(meter.consumed().is_zero()); }); // Not enough weight for `service_page_base_no_completion`. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("service_page_base_no_completion", 2.into_weight()); let mut meter = WeightMeter::from_limit(1.into_weight()); @@ -476,13 +478,13 @@ fn service_page_bails() { &mut meter, Weight::MAX )); - assert!(meter.consumed.is_zero()); + assert!(meter.consumed().is_zero()); }); } #[test] fn service_page_item_bails() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let _guard = StorageNoopGuard::default(); let (mut page, _) = full_page::(); let mut weight = WeightMeter::from_limit(10.into_weight()); @@ -509,7 +511,7 @@ fn service_page_suspension_works() { use MessageOrigin::*; use PageExecutionStatus::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let (page, mut msgs) = full_page::(); assert!(msgs >= 10, "pre-condition: need at least 10 msgs per page"); let mut book = book_for::(&page); @@ -526,7 +528,7 @@ fn service_page_suspension_works() { msgs -= 5; // Then we pause the queue. - SuspendedQueues::set(vec![Here]); + YieldingQueues::set(vec![Here]); // Noting happens... for _ in 0..5 { let (_, status) = crate::Pallet::::service_page( @@ -540,7 +542,7 @@ fn service_page_suspension_works() { } // Resume and process all remaining. - SuspendedQueues::take(); + YieldingQueues::take(); let (_, status) = crate::Pallet::::service_page( &Here, &mut book, @@ -557,14 +559,8 @@ fn service_page_suspension_works() { #[test] fn bump_service_head_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { - // Create a ready ring with three queues. - BookStateFor::::insert(Here, empty_book::()); - knit(&Here); - BookStateFor::::insert(There, empty_book::()); - knit(&There); - BookStateFor::::insert(Everywhere(0), empty_book::()); - knit(&Everywhere(0)); + build_and_execute::(|| { + build_triple_ring(); // Bump 99 times. for i in 0..99 { @@ -580,48 +576,41 @@ fn bump_service_head_works() { /// `bump_service_head` does nothing when called with an insufficient weight limit. #[test] fn bump_service_head_bails() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("bump_service_head", 2.into_weight()); - setup_bump_service_head::(0.into(), 10.into()); + setup_bump_service_head::(0.into(), 1.into()); let _guard = StorageNoopGuard::default(); let mut meter = WeightMeter::from_limit(1.into_weight()); assert!(MessageQueue::bump_service_head(&mut meter).is_none()); - assert_eq!(meter.consumed, 0.into_weight()); + assert_eq!(meter.consumed(), 0.into_weight()); }); } #[test] fn bump_service_head_trivial_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("bump_service_head", 2.into_weight()); let mut meter = WeightMeter::max_limit(); assert_eq!(MessageQueue::bump_service_head(&mut meter), None, "Cannot bump"); - assert_eq!(meter.consumed, 2.into_weight()); + assert_eq!(meter.consumed(), 2.into_weight()); setup_bump_service_head::(0.into(), 1.into()); assert_eq!(MessageQueue::bump_service_head(&mut meter), Some(0.into())); assert_eq!(ServiceHead::::get().unwrap(), 1.into(), "Bumped the head"); - assert_eq!(meter.consumed, 4.into_weight()); + assert_eq!(meter.consumed(), 4.into_weight()); - assert_eq!(MessageQueue::bump_service_head(&mut meter), None, "Cannot bump"); - assert_eq!(meter.consumed, 6.into_weight()); + assert_eq!(MessageQueue::bump_service_head(&mut meter), Some(1.into()), "Its a ring"); + assert_eq!(meter.consumed(), 6.into_weight()); }); } #[test] fn bump_service_head_no_head_noops() { - use MessageOrigin::*; - new_test_ext::().execute_with(|| { - // Create a ready ring with three queues. - BookStateFor::::insert(Here, empty_book::()); - knit(&Here); - BookStateFor::::insert(There, empty_book::()); - knit(&There); - BookStateFor::::insert(Everywhere(0), empty_book::()); - knit(&Everywhere(0)); + build_and_execute::(|| { + build_triple_ring(); // But remove the service head. ServiceHead::::kill(); @@ -633,7 +622,7 @@ fn bump_service_head_no_head_noops() { #[test] fn service_page_item_consumes_correct_weight() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let mut page = page::(b"weight=3"); let mut weight = WeightMeter::from_limit(10.into_weight()); let overweight_limit = 0.into_weight(); @@ -650,14 +639,14 @@ fn service_page_item_consumes_correct_weight() { ), ItemExecutionStatus::Executed(true) ); - assert_eq!(weight.consumed, 5.into_weight()); + assert_eq!(weight.consumed(), 5.into_weight()); }); } /// `service_page_item` skips a permanently `Overweight` message and marks it as `unprocessed`. #[test] fn service_page_item_skips_perm_overweight_message() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let mut page = page::(b"TooMuch"); let mut weight = WeightMeter::from_limit(2.into_weight()); let overweight_limit = 0.into_weight(); @@ -674,10 +663,10 @@ fn service_page_item_skips_perm_overweight_message() { ), ItemExecutionStatus::Executed(false) ); - assert_eq!(weight.consumed, 2.into_weight()); + assert_eq!(weight.consumed(), 2.into_weight()); assert_last_event::( Event::OverweightEnqueued { - hash: ::Hashing::hash(b"TooMuch"), + id: blake2_256(b"TooMuch"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -696,7 +685,7 @@ fn service_page_item_skips_perm_overweight_message() { #[test] fn peek_index_works() { use super::integration_test::Test; // Run with larger page size. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Fill a page with messages. let (mut page, msgs) = full_page::(); let msg_enc_len = ItemHeader::<::Size>::max_encoded_len() + 4; @@ -717,7 +706,7 @@ fn peek_index_works() { #[test] fn peek_first_and_skip_first_works() { use super::integration_test::Test; // Run with larger page size. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Fill a page with messages. let (mut page, msgs) = full_page::(); @@ -740,7 +729,7 @@ fn peek_first_and_skip_first_works() { #[test] fn note_processed_at_pos_works() { use super::integration_test::Test; // Run with larger page size. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let (mut page, msgs) = full_page::(); for i in 0..msgs { @@ -776,7 +765,7 @@ fn note_processed_at_pos_idempotent() { #[test] fn is_complete_works() { use super::integration_test::Test; // Run with larger page size. - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let (mut page, msgs) = full_page::(); assert!(msgs > 3, "Boring"); let msg_enc_len = ItemHeader::<::Size>::max_encoded_len() + 4; @@ -932,8 +921,9 @@ fn page_from_message_max_len_works() { #[test] fn sweep_queue_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { build_triple_ring(); + QueueChanges::take(); let book = BookStateFor::::get(Here); assert!(book.begin != book.end); @@ -968,9 +958,8 @@ fn sweep_queue_works() { #[test] fn sweep_queue_wraps_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { - BookStateFor::::insert(Here, empty_book::()); - knit(&Here); + build_and_execute::(|| { + build_ring::(&[Here]); MessageQueue::sweep_queue(Here); let book = BookStateFor::::get(Here); @@ -981,14 +970,14 @@ fn sweep_queue_wraps_works() { #[test] fn sweep_queue_invalid_noops() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { assert_storage_noop!(MessageQueue::sweep_queue(Here)); }); } #[test] fn footprint_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let origin = MessageOrigin::Here; let (page, msgs) = full_page::(); let book = book_for::(&page); @@ -1006,7 +995,7 @@ fn footprint_works() { /// The footprint of an invalid queue is the default footprint. #[test] fn footprint_invalid_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let origin = MessageOrigin::Here; assert_eq!(MessageQueue::footprint(origin), Default::default()); }) @@ -1016,7 +1005,7 @@ fn footprint_invalid_works() { #[test] fn footprint_on_swept_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { let mut book = empty_book::(); book.message_count = 3; book.size = 10; @@ -1032,7 +1021,7 @@ fn footprint_on_swept_works() { #[test] fn execute_overweight_works() { - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("bump_service_head", 1.into_weight()); set_weight("service_queue_base", 1.into_weight()); set_weight("service_page_base_completion", 1.into_weight()); @@ -1050,7 +1039,7 @@ fn execute_overweight_works() { assert_eq!(QueueChanges::take(), vec![(origin, 1, 8)]); assert_last_event::( Event::OverweightEnqueued { - hash: ::Hashing::hash(b"weight=6"), + id: blake2_256(b"weight=6"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -1092,7 +1081,7 @@ fn execute_overweight_works() { fn permanently_overweight_book_unknits() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("bump_service_head", 1.into_weight()); set_weight("service_queue_base", 1.into_weight()); set_weight("service_page_base_completion", 1.into_weight()); @@ -1105,7 +1094,7 @@ fn permanently_overweight_book_unknits() { assert_eq!(MessageQueue::service_queues(8.into_weight()), 4.into_weight()); assert_last_event::( Event::OverweightEnqueued { - hash: ::Hashing::hash(b"weight=9"), + id: blake2_256(b"weight=9"), origin: Here, message_index: 0, page_index: 0, @@ -1129,7 +1118,7 @@ fn permanently_overweight_book_unknits() { fn permanently_overweight_book_unknits_multiple() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { set_weight("bump_service_head", 1.into_weight()); set_weight("service_queue_base", 1.into_weight()); set_weight("service_page_base_completion", 1.into_weight()); @@ -1168,7 +1157,7 @@ fn permanently_overweight_book_unknits_multiple() { fn ready_but_empty_does_not_panic() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { BookStateFor::::insert(Here, empty_book::()); BookStateFor::::insert(There, empty_book::()); @@ -1188,7 +1177,7 @@ fn ready_but_empty_does_not_panic() { fn ready_but_perm_overweight_does_not_panic() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { MessageQueue::enqueue_message(msg("weight=9"), Here); assert_eq!(MessageQueue::service_queues(8.into_weight()), 0.into_weight()); assert_ring(&[]); @@ -1208,7 +1197,7 @@ fn ready_but_perm_overweight_does_not_panic() { fn ready_ring_knit_basic_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { BookStateFor::::insert(Here, empty_book::()); for i in 0..10 { @@ -1228,12 +1217,15 @@ fn ready_ring_knit_basic_works() { fn ready_ring_knit_and_unknit_works() { use MessageOrigin::*; - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Place three queues into the storage. BookStateFor::::insert(Here, empty_book::()); BookStateFor::::insert(There, empty_book::()); BookStateFor::::insert(Everywhere(0), empty_book::()); + // Pausing should make no difference: + PausedQueues::set(vec![Here, There, Everywhere(0)]); + // Knit them into the ready ring. assert_ring(&[]); knit(&Here); @@ -1259,7 +1251,7 @@ fn enqueue_message_works() { let max_msg_per_page = ::HeapSize::get() as u64 / (ItemHeader::<::Size>::max_encoded_len() as u64 + 1); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Enqueue messages which should fill three pages. let n = max_msg_per_page * 3; for i in 1..=n { @@ -1289,7 +1281,7 @@ fn enqueue_messages_works() { let max_msg_per_page = ::HeapSize::get() as u64 / (ItemHeader::<::Size>::max_encoded_len() as u64 + 1); - new_test_ext::().execute_with(|| { + build_and_execute::(|| { // Enqueue messages which should fill three pages. let n = max_msg_per_page * 3; let msgs = vec![msg("a"); n as usize]; @@ -1314,3 +1306,144 @@ fn enqueue_messages_works() { assert_eq!(book.count as usize, Pages::::iter().count()); }); } + +#[test] +fn service_queues_suspend_works() { + use MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(QueueChanges::take(), vec![(Here, 3, 6), (There, 3, 6)]); + + // Pause `Here` - execution starts `There`. + PausedQueues::set(vec![Here]); + assert_eq!( + (true, false), + ( + ::QueuePausedQuery::is_paused(&Here), + ::QueuePausedQuery::is_paused(&There) + ) + ); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("x"), There)]); + assert_eq!(QueueChanges::take(), vec![(There, 2, 5)]); + + // Unpause `Here` - execution continues `There`. + PausedQueues::take(); + assert_eq!( + (false, false), + ( + ::QueuePausedQuery::is_paused(&Here), + ::QueuePausedQuery::is_paused(&There) + ) + ); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("xy"), There)]); + assert_eq!(QueueChanges::take(), vec![(There, 1, 3)]); + + // Now it swaps to `Here`. + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("a"), Here)]); + assert_eq!(QueueChanges::take(), vec![(Here, 2, 5)]); + + // Pause `There` - execution continues `Here`. + PausedQueues::set(vec![There]); + assert_eq!( + (false, true), + ( + ::QueuePausedQuery::is_paused(&Here), + ::QueuePausedQuery::is_paused(&There) + ) + ); + assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("ab"), Here)]); + assert_eq!(QueueChanges::take(), vec![(Here, 1, 3)]); + + // Unpause `There` and service all remaining messages. + PausedQueues::take(); + assert_eq!( + (false, false), + ( + ::QueuePausedQuery::is_paused(&Here), + ::QueuePausedQuery::is_paused(&There) + ) + ); + assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("abc"), Here), (vmsg("xyz"), There)]); + assert_eq!(QueueChanges::take(), vec![(Here, 0, 0), (There, 0, 0)]); + }); +} + +/// Tests that manual overweight execution on a suspended queue errors with `QueueSuspended`. +#[test] +fn execute_overweight_respects_suspension() { + build_and_execute::(|| { + let origin = MessageOrigin::Here; + MessageQueue::enqueue_message(msg("weight=5"), origin); + // Mark the message as permanently overweight. + MessageQueue::service_queues(4.into_weight()); + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"weight=5"), + origin, + message_index: 0, + page_index: 0, + } + .into(), + ); + PausedQueues::set(vec![origin]); + assert!(::QueuePausedQuery::is_paused(&origin)); + + // Execution should fail. + assert_eq!( + ::execute_overweight(Weight::MAX, (origin, 0, 0)), + Err(ExecuteOverweightError::QueuePaused) + ); + + PausedQueues::take(); + assert!(!::QueuePausedQuery::is_paused(&origin)); + + // Execution should work again with same args. + assert_ok!(::execute_overweight( + Weight::MAX, + (origin, 0, 0) + )); + + assert_last_event::( + Event::Processed { + id: blake2_256(b"weight=5"), + origin, + weight_used: 5.into_weight(), + success: true, + } + .into(), + ); + }); +} + +#[test] +fn service_queue_suspension_ready_ring_works() { + build_and_execute::(|| { + let origin = MessageOrigin::Here; + PausedQueues::set(vec![origin]); + MessageQueue::enqueue_message(msg("weight=5"), origin); + + MessageQueue::service_queues(Weight::MAX); + // It did not execute but is in the ready ring. + assert!(System::events().is_empty(), "Paused"); + assert_ring(&[origin]); + + // Now when we un-pause, it will execute. + PausedQueues::take(); + MessageQueue::service_queues(Weight::MAX); + assert_last_event::( + Event::Processed { + id: blake2_256(b"weight=5"), + origin, + weight_used: 5.into_weight(), + success: true, + } + .into(), + ); + }); +} diff --git a/frame/message-queue/src/weights.rs b/frame/message-queue/src/weights.rs index 9dae12f518e44..e86f23e274ff2 100644 --- a/frame/message-queue/src/weights.rs +++ b/frame/message-queue/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_message_queue //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_message_queue +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_message_queue. pub trait WeightInfo { @@ -69,10 +73,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `233` + // Measured: `267` // Estimated: `6038` - // Minimum execution time: 12_076_000 picoseconds. - Weight::from_parts(12_350_000, 6038) + // Minimum execution time: 12_025_000 picoseconds. + Weight::from_parts(12_597_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -82,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `233` + // Measured: `267` // Estimated: `6038` - // Minimum execution time: 11_586_000 picoseconds. - Weight::from_parts(11_912_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_785_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -93,10 +97,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_581_000 picoseconds. - Weight::from_parts(4_715_000, 3514) + // Minimum execution time: 4_467_000 picoseconds. + Weight::from_parts(4_655_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,10 +108,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `147` // Estimated: `69049` - // Minimum execution time: 5_826_000 picoseconds. - Weight::from_parts(5_932_000, 69049) + // Minimum execution time: 6_103_000 picoseconds. + Weight::from_parts(6_254_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -115,10 +119,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_235_000 picoseconds. - Weight::from_parts(6_430_000, 69049) + // Minimum execution time: 6_320_000 picoseconds. + Weight::from_parts(6_565_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,8 +130,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 53_860_000 picoseconds. - Weight::from_parts(53_984_000, 0) + // Minimum execution time: 66_062_000 picoseconds. + Weight::from_parts(66_371_000, 0) } /// Storage: MessageQueue ServiceHead (r:1 w:1) /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -135,10 +139,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `140` + // Measured: `174` // Estimated: `3514` - // Minimum execution time: 7_018_000 picoseconds. - Weight::from_parts(7_205_000, 3514) + // Minimum execution time: 6_788_000 picoseconds. + Weight::from_parts(7_176_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -148,10 +152,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 53_485_000 picoseconds. - Weight::from_parts(54_154_000, 69049) + // Minimum execution time: 52_865_000 picoseconds. + Weight::from_parts(54_398_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -161,10 +165,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 68_830_000 picoseconds. - Weight::from_parts(69_487_000, 69049) + // Minimum execution time: 69_168_000 picoseconds. + Weight::from_parts(70_560_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -174,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 81_643_000 picoseconds. - Weight::from_parts(82_399_000, 69049) + // Minimum execution time: 80_947_000 picoseconds. + Weight::from_parts(82_715_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -191,10 +195,10 @@ impl WeightInfo for () { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `233` + // Measured: `267` // Estimated: `6038` - // Minimum execution time: 12_076_000 picoseconds. - Weight::from_parts(12_350_000, 6038) + // Minimum execution time: 12_025_000 picoseconds. + Weight::from_parts(12_597_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -204,10 +208,10 @@ impl WeightInfo for () { /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `233` + // Measured: `267` // Estimated: `6038` - // Minimum execution time: 11_586_000 picoseconds. - Weight::from_parts(11_912_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_785_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -215,10 +219,10 @@ impl WeightInfo for () { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_581_000 picoseconds. - Weight::from_parts(4_715_000, 3514) + // Minimum execution time: 4_467_000 picoseconds. + Weight::from_parts(4_655_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -226,10 +230,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `147` // Estimated: `69049` - // Minimum execution time: 5_826_000 picoseconds. - Weight::from_parts(5_932_000, 69049) + // Minimum execution time: 6_103_000 picoseconds. + Weight::from_parts(6_254_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -237,10 +241,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_235_000 picoseconds. - Weight::from_parts(6_430_000, 69049) + // Minimum execution time: 6_320_000 picoseconds. + Weight::from_parts(6_565_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -248,8 +252,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 53_860_000 picoseconds. - Weight::from_parts(53_984_000, 0) + // Minimum execution time: 66_062_000 picoseconds. + Weight::from_parts(66_371_000, 0) } /// Storage: MessageQueue ServiceHead (r:1 w:1) /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) @@ -257,10 +261,10 @@ impl WeightInfo for () { /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `140` + // Measured: `174` // Estimated: `3514` - // Minimum execution time: 7_018_000 picoseconds. - Weight::from_parts(7_205_000, 3514) + // Minimum execution time: 6_788_000 picoseconds. + Weight::from_parts(7_176_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -270,10 +274,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 53_485_000 picoseconds. - Weight::from_parts(54_154_000, 69049) + // Minimum execution time: 52_865_000 picoseconds. + Weight::from_parts(54_398_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -283,10 +287,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 68_830_000 picoseconds. - Weight::from_parts(69_487_000, 69049) + // Minimum execution time: 69_168_000 picoseconds. + Weight::from_parts(70_560_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -296,10 +300,10 @@ impl WeightInfo for () { /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65710` + // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 81_643_000 picoseconds. - Weight::from_parts(82_399_000, 69049) + // Minimum execution time: 80_947_000 picoseconds. + Weight::from_parts(82_715_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 98a15c5a5aada..07e13e2238695 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -13,21 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } # third party log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -40,10 +39,18 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 64058be9c8fbf..ab117315f8985 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -59,7 +59,7 @@ use frame_support::{ weights::Weight, BoundedVec, RuntimeDebug, }; -use frame_system::{self as system, RawOrigin}; +use frame_system::{self as system, pallet_prelude::BlockNumberFor, RawOrigin}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ @@ -183,7 +183,7 @@ pub mod pallet { T::AccountId, Blake2_128Concat, [u8; 32], - Multisig, T::AccountId, T::MaxSignatories>, + Multisig, BalanceOf, T::AccountId, T::MaxSignatories>, >; #[pallet::error] @@ -226,14 +226,14 @@ pub mod pallet { /// A multisig operation has been approved by someone. MultisigApproval { approving: T::AccountId, - timepoint: Timepoint, + timepoint: Timepoint>, multisig: T::AccountId, call_hash: CallHash, }, /// A multisig operation has been executed. MultisigExecuted { approving: T::AccountId, - timepoint: Timepoint, + timepoint: Timepoint>, multisig: T::AccountId, call_hash: CallHash, result: DispatchResult, @@ -241,7 +241,7 @@ pub mod pallet { /// A multisig operation has been cancelled. MultisigCancelled { cancelling: T::AccountId, - timepoint: Timepoint, + timepoint: Timepoint>, multisig: T::AccountId, call_hash: CallHash, }, @@ -366,7 +366,7 @@ pub mod pallet { origin: OriginFor, threshold: u16, other_signatories: Vec, - maybe_timepoint: Option>, + maybe_timepoint: Option>>, call: Box<::RuntimeCall>, max_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -423,7 +423,7 @@ pub mod pallet { origin: OriginFor, threshold: u16, other_signatories: Vec, - maybe_timepoint: Option>, + maybe_timepoint: Option>>, call_hash: [u8; 32], max_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -465,7 +465,7 @@ pub mod pallet { origin: OriginFor, threshold: u16, other_signatories: Vec, - timepoint: Timepoint, + timepoint: Timepoint>, call_hash: [u8; 32], ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -511,7 +511,7 @@ impl Pallet { who: T::AccountId, threshold: u16, other_signatories: Vec, - maybe_timepoint: Option>, + maybe_timepoint: Option>>, call_or_hash: CallOrHash, max_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -637,7 +637,7 @@ impl Pallet { } /// The current `Timepoint`. - pub fn timepoint() -> Timepoint { + pub fn timepoint() -> Timepoint> { Timepoint { height: >::block_number(), index: >::extrinsic_index().unwrap_or_default(), diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs index 2a9c858a5552f..298e73c5d7576 100644 --- a/frame/multisig/src/migrations.rs +++ b/frame/multisig/src/migrations.rs @@ -43,7 +43,7 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let onchain = Pallet::::on_chain_storage_version(); ensure!(onchain < 1, "this migration can be deleted"); @@ -72,7 +72,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { let onchain = Pallet::::on_chain_storage_version(); ensure!(onchain < 2, "this migration needs to be removed"); ensure!(onchain == 1, "this migration needs to be run"); diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 7e7f1668026a2..e7fc5b3e4aaea 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -23,72 +23,44 @@ use super::*; use crate as pallet_multisig; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, traits::{ConstU32, ConstU64, Contains}, }; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - TokenError, -}; +use sp_runtime::{BuildStorage, TokenError}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlockU32; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = TestBaseCallFilter; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); + type Block = Block; + type BlockHashCount = ConstU32<250>; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); + type BaseCallFilter = TestBaseCallFilter; type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + + type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = (); + type ReserveIdentifier = [u8; 8]; type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type HoldIdentifier = (); - type MaxHolds = (); + type ExistentialDeposit = ConstU64<1>; } pub struct TestBaseCallFilter; @@ -115,7 +87,7 @@ impl Config for Test { use pallet_balances::Call as BalancesCall; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], } @@ -126,7 +98,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn now() -> Timepoint { +fn now() -> Timepoint { Multisig::timepoint() } diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 7fda4bec8352d..7b87d258d383d 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_multisig. pub trait WeightInfo { @@ -65,10 +69,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_199_000 picoseconds. - Weight::from_parts(12_595_771, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(490, 0).saturating_mul(z.into())) + // Minimum execution time: 13_452_000 picoseconds. + Weight::from_parts(14_425_869, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) } /// Storage: Multisig Multisigs (r:1 w:1) /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) @@ -78,12 +82,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 42_810_000 picoseconds. - Weight::from_parts(37_500_997, 6811) - // Standard Error: 308 - .saturating_add(Weight::from_parts(59_961, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(z.into())) + // Minimum execution time: 46_012_000 picoseconds. + Weight::from_parts(34_797_344, 6811) + // Standard Error: 833 + .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -95,12 +99,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 27_775_000 picoseconds. - Weight::from_parts(22_868_524, 6811) - // Standard Error: 273 - .saturating_add(Weight::from_parts(55_219, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(z.into())) + // Minimum execution time: 29_834_000 picoseconds. + Weight::from_parts(20_189_154, 6811) + // Standard Error: 637 + .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -114,12 +118,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `426 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 48_223_000 picoseconds. - Weight::from_parts(39_193_453, 6811) - // Standard Error: 2_162 - .saturating_add(Weight::from_parts(93_763, 0).saturating_mul(s.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_372, 0).saturating_mul(z.into())) + // Minimum execution time: 51_464_000 picoseconds. + Weight::from_parts(39_246_644, 6811) + // Standard Error: 1_251 + .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -130,10 +134,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 34_775_000 picoseconds. - Weight::from_parts(35_966_626, 6811) - // Standard Error: 464 - .saturating_add(Weight::from_parts(61_492, 0).saturating_mul(s.into())) + // Minimum execution time: 33_275_000 picoseconds. + Weight::from_parts(34_073_221, 6811) + // Standard Error: 1_163 + .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -144,10 +148,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 19_947_000 picoseconds. - Weight::from_parts(21_253_025, 6811) - // Standard Error: 402 - .saturating_add(Weight::from_parts(58_491, 0).saturating_mul(s.into())) + // Minimum execution time: 18_411_000 picoseconds. + Weight::from_parts(19_431_787, 6811) + // Standard Error: 694 + .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,10 +162,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `492 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 35_023_000 picoseconds. - Weight::from_parts(36_756_977, 6811) - // Standard Error: 547 - .saturating_add(Weight::from_parts(62_235, 0).saturating_mul(s.into())) + // Minimum execution time: 33_985_000 picoseconds. + Weight::from_parts(35_547_970, 6811) + // Standard Error: 1_135 + .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -174,10 +178,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_199_000 picoseconds. - Weight::from_parts(12_595_771, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(490, 0).saturating_mul(z.into())) + // Minimum execution time: 13_452_000 picoseconds. + Weight::from_parts(14_425_869, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) } /// Storage: Multisig Multisigs (r:1 w:1) /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) @@ -187,12 +191,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 42_810_000 picoseconds. - Weight::from_parts(37_500_997, 6811) - // Standard Error: 308 - .saturating_add(Weight::from_parts(59_961, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(z.into())) + // Minimum execution time: 46_012_000 picoseconds. + Weight::from_parts(34_797_344, 6811) + // Standard Error: 833 + .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -204,12 +208,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 27_775_000 picoseconds. - Weight::from_parts(22_868_524, 6811) - // Standard Error: 273 - .saturating_add(Weight::from_parts(55_219, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(z.into())) + // Minimum execution time: 29_834_000 picoseconds. + Weight::from_parts(20_189_154, 6811) + // Standard Error: 637 + .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -223,12 +227,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `426 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 48_223_000 picoseconds. - Weight::from_parts(39_193_453, 6811) - // Standard Error: 2_162 - .saturating_add(Weight::from_parts(93_763, 0).saturating_mul(s.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_372, 0).saturating_mul(z.into())) + // Minimum execution time: 51_464_000 picoseconds. + Weight::from_parts(39_246_644, 6811) + // Standard Error: 1_251 + .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -239,10 +243,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 34_775_000 picoseconds. - Weight::from_parts(35_966_626, 6811) - // Standard Error: 464 - .saturating_add(Weight::from_parts(61_492, 0).saturating_mul(s.into())) + // Minimum execution time: 33_275_000 picoseconds. + Weight::from_parts(34_073_221, 6811) + // Standard Error: 1_163 + .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -253,10 +257,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 19_947_000 picoseconds. - Weight::from_parts(21_253_025, 6811) - // Standard Error: 402 - .saturating_add(Weight::from_parts(58_491, 0).saturating_mul(s.into())) + // Minimum execution time: 18_411_000 picoseconds. + Weight::from_parts(19_431_787, 6811) + // Standard Error: 694 + .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -267,10 +271,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `492 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 35_023_000 picoseconds. - Weight::from_parts(36_756_977, 6811) - // Standard Error: 547 - .saturating_add(Weight::from_parts(62_235, 0).saturating_mul(s.into())) + // Minimum execution time: 33_985_000 picoseconds. + Weight::from_parts(35_547_970, 6811) + // Standard Error: 1_135 + .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/nft-fractionalization/Cargo.toml b/frame/nft-fractionalization/Cargo.toml new file mode 100644 index 0000000000000..8f706a1e00ae7 --- /dev/null +++ b/frame/nft-fractionalization/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "pallet-nft-fractionalization" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to convert non-fungible to fungible tokens." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../assets" } +pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../nfts" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-nfts/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", + "pallet-balances/std", + "sp-core/std", + "sp-io/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-nfts/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "pallet-nfts/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/nft-fractionalization/README.md b/frame/nft-fractionalization/README.md new file mode 100644 index 0000000000000..180eef22cc46f --- /dev/null +++ b/frame/nft-fractionalization/README.md @@ -0,0 +1,6 @@ +### Lock NFT + +Lock an NFT from `pallet-nfts` and mint fungible assets from `pallet-assets`. + +The NFT gets locked by putting a system-level attribute named `Locked`. This prevents the NFT from being transferred further. +The NFT becomes unlocked when the `Locked` attribute is removed. In order to unify the fungible asset and unlock the NFT, an account must hold the full issuance of the asset the NFT was fractionalised into. Holding less of the fungible asset will not allow the unlocking of the NFT. diff --git a/frame/nft-fractionalization/src/benchmarking.rs b/frame/nft-fractionalization/src/benchmarking.rs new file mode 100644 index 0000000000000..0b54acdab49ea --- /dev/null +++ b/frame/nft-fractionalization/src/benchmarking.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Nft fractionalization pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_support::{ + assert_ok, + traits::{ + fungible::{Inspect as InspectFungible, Mutate as MutateFungible}, + tokens::nonfungibles_v2::{Create, Mutate}, + Get, + }, +}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; +use pallet_nfts::{CollectionConfig, CollectionSettings, ItemConfig, MintSettings}; +use sp_runtime::traits::StaticLookup; +use sp_std::prelude::*; + +use crate::Pallet as NftFractionalization; + +type BalanceOf = + <::Currency as InspectFungible<::AccountId>>::Balance; + +type CollectionConfigOf = + CollectionConfig, BlockNumberFor, ::NftCollectionId>; + +fn default_collection_config() -> CollectionConfigOf +where + T::Currency: InspectFungible, +{ + CollectionConfig { + settings: CollectionSettings::all_enabled(), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn mint_nft(nft_id: T::NftId) -> (T::AccountId, AccountIdLookupOf) +where + T::Nfts: Create, BlockNumberFor, T::NftCollectionId>> + + Mutate, +{ + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let ed = T::Currency::minimum_balance(); + let multiplier = BalanceOf::::from(100u8); + T::Currency::set_balance(&caller, ed * multiplier + T::Deposit::get() * multiplier); + + assert_ok!(T::Nfts::create_collection(&caller, &caller, &default_collection_config::())); + let collection = T::BenchmarkHelper::collection(0); + assert_ok!(T::Nfts::mint_into(&collection, &nft_id, &caller, &ItemConfig::default(), true)); + (caller, caller_lookup) +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks! { + where_clause { + where + T::Nfts: Create, frame_system::pallet_prelude::BlockNumberFor::, T::NftCollectionId>> + + Mutate, + } + + fractionalize { + let asset = T::BenchmarkHelper::asset(0); + let collection = T::BenchmarkHelper::collection(0); + let nft = T::BenchmarkHelper::nft(0); + let (caller, caller_lookup) = mint_nft::(nft); + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup, 1000u32.into()) + verify { + assert_last_event::( + Event::NftFractionalized { + nft_collection: collection, + nft, + fractions: 1000u32.into(), + asset, + beneficiary: caller, + }.into() + ); + } + + unify { + let asset = T::BenchmarkHelper::asset(0); + let collection = T::BenchmarkHelper::collection(0); + let nft = T::BenchmarkHelper::nft(0); + let (caller, caller_lookup) = mint_nft::(nft); + NftFractionalization::::fractionalize( + SystemOrigin::Signed(caller.clone()).into(), + collection, + nft, + asset.clone(), + caller_lookup.clone(), + 1000u32.into(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup) + verify { + assert_last_event::( + Event::NftUnified { + nft_collection: collection, + nft, + asset, + beneficiary: caller, + }.into() + ); + } + + impl_benchmark_test_suite!(NftFractionalization, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/nft-fractionalization/src/lib.rs b/frame/nft-fractionalization/src/lib.rs new file mode 100644 index 0000000000000..b1663e95d855d --- /dev/null +++ b/frame/nft-fractionalization/src/lib.rs @@ -0,0 +1,407 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # NFT Fractionalization Pallet +//! +//! This pallet provides the basic functionality that should allow users +//! to leverage partial ownership, transfers, and sales, of illiquid assets, +//! whether real-world assets represented by their digital twins, or NFTs, +//! or original NFTs. +//! +//! The functionality allows a user to lock an NFT they own, create a new +//! fungible asset, and mint a set amount of tokens (`fractions`). +//! +//! It also allows the user to burn 100% of the asset and to unlock the NFT +//! into their account. +//! +//! ### Functions +//! +//! * `fractionalize`: Lock the NFT and create and mint a new fungible asset. +//! * `unify`: Return 100% of the asset and unlock the NFT. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod types; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; + +pub mod weights; + +use frame_system::Config as SystemConfig; +pub use pallet::*; +pub use scale_info::Type; +pub use types::*; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::DispatchResult, + ensure, + pallet_prelude::*, + sp_runtime::traits::{AccountIdConversion, StaticLookup}, + traits::{ + fungible::{ + hold::Mutate as HoldMutateFungible, Inspect as InspectFungible, + Mutate as MutateFungible, + }, + fungibles::{ + metadata::{MetadataDeposit, Mutate as MutateMetadata}, + Create, Destroy, Inspect, Mutate, + }, + tokens::{ + nonfungibles_v2::{Inspect as NonFungiblesInspect, Transfer}, + AssetId, Balance as AssetBalance, + Fortitude::Polite, + Precision::{BestEffort, Exact}, + Preservation::Preserve, + }, + }, + BoundedVec, PalletId, + }; + use frame_system::pallet_prelude::*; + use scale_info::prelude::{format, string::String}; + use sp_runtime::traits::{One, Zero}; + use sp_std::{fmt::Display, prelude::*}; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The currency mechanism, used for paying for deposits. + type Currency: InspectFungible + + MutateFungible + + HoldMutateFungible; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// The deposit paid by the user locking an NFT. The deposit is returned to the original NFT + /// owner when the asset is unified and the NFT is unlocked. + #[pallet::constant] + type Deposit: Get>; + + /// Identifier for the collection of NFT. + type NftCollectionId: Member + Parameter + MaxEncodedLen + Copy + Display; + + /// The type used to identify an NFT within a collection. + type NftId: Member + Parameter + MaxEncodedLen + Copy + Display; + + /// The type used to describe the amount of fractions converted into assets. + type AssetBalance: AssetBalance; + + /// The type used to identify the assets created during fractionalization. + type AssetId: AssetId; + + /// Registry for the minted assets. + type Assets: Inspect + + Create + + Destroy + + Mutate + + MutateMetadata + + MetadataDeposit>; + + /// Registry for minted NFTs. + type Nfts: NonFungiblesInspect< + Self::AccountId, + ItemId = Self::NftId, + CollectionId = Self::NftCollectionId, + > + Transfer; + + /// The pallet's id, used for deriving its sovereign account ID. + #[pallet::constant] + type PalletId: Get; + + /// The newly created asset's symbol. + #[pallet::constant] + type NewAssetSymbol: Get>; + + /// The newly created asset's name. + #[pallet::constant] + type NewAssetName: Get>; + + /// The maximum length of a name or symbol stored on-chain. + #[pallet::constant] + type StringLimit: Get; + + /// A set of helper functions for benchmarking. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelper; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// Keeps track of the corresponding NFT ID, asset ID and amount minted. + #[pallet::storage] + #[pallet::getter(fn nft_to_asset)] + pub type NftToAsset = StorageMap< + _, + Blake2_128Concat, + (T::NftCollectionId, T::NftId), + Details, AssetBalanceOf, DepositOf, T::AccountId>, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An NFT was successfully fractionalized. + NftFractionalized { + nft_collection: T::NftCollectionId, + nft: T::NftId, + fractions: AssetBalanceOf, + asset: AssetIdOf, + beneficiary: T::AccountId, + }, + /// An NFT was successfully returned back. + NftUnified { + nft_collection: T::NftCollectionId, + nft: T::NftId, + asset: AssetIdOf, + beneficiary: T::AccountId, + }, + } + + #[pallet::error] + pub enum Error { + /// Asset ID does not correspond to locked NFT. + IncorrectAssetId, + /// The signing account has no permission to do the operation. + NoPermission, + /// NFT doesn't exist. + NftNotFound, + /// NFT has not yet been fractionalised. + NftNotFractionalized, + } + + /// A reason for the pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// Reserved for a fractionalized NFT. + #[codec(index = 0)] + Fractionalized, + } + + #[pallet::call] + impl Pallet { + /// Lock the NFT and mint a new fungible asset. + /// + /// The dispatch origin for this call must be Signed. + /// The origin must be the owner of the NFT they are trying to lock. + /// + /// `Deposit` funds of sender are reserved. + /// + /// - `nft_collection_id`: The ID used to identify the collection of the NFT. + /// Is used within the context of `pallet_nfts`. + /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// Is used within the context of `pallet_nfts`. + /// - `asset_id`: The ID of the new asset. It must not exist. + /// Is used within the context of `pallet_assets`. + /// - `beneficiary`: The account that will receive the newly created asset. + /// - `fractions`: The total issuance of the newly created asset class. + /// + /// Emits `NftFractionalized` event when successful. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::fractionalize())] + pub fn fractionalize( + origin: OriginFor, + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + asset_id: AssetIdOf, + beneficiary: AccountIdLookupOf, + fractions: AssetBalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + let nft_owner = + T::Nfts::owner(&nft_collection_id, &nft_id).ok_or(Error::::NftNotFound)?; + ensure!(nft_owner == who, Error::::NoPermission); + + let pallet_account = Self::get_pallet_account(); + let deposit = T::Deposit::get(); + T::Currency::hold(&HoldReason::Fractionalized.into(), &nft_owner, deposit)?; + Self::do_lock_nft(nft_collection_id, nft_id)?; + Self::do_create_asset(asset_id.clone(), pallet_account.clone())?; + Self::do_mint_asset(asset_id.clone(), &beneficiary, fractions)?; + Self::do_set_metadata( + asset_id.clone(), + &who, + &pallet_account, + &nft_collection_id, + &nft_id, + )?; + + NftToAsset::::insert( + (nft_collection_id, nft_id), + Details { asset: asset_id.clone(), fractions, asset_creator: nft_owner, deposit }, + ); + + Self::deposit_event(Event::NftFractionalized { + nft_collection: nft_collection_id, + nft: nft_id, + fractions, + asset: asset_id, + beneficiary, + }); + + Ok(()) + } + + /// Burn the total issuance of the fungible asset and return (unlock) the locked NFT. + /// + /// The dispatch origin for this call must be Signed. + /// + /// `Deposit` funds will be returned to `asset_creator`. + /// + /// - `nft_collection_id`: The ID used to identify the collection of the NFT. + /// Is used within the context of `pallet_nfts`. + /// - `nft_id`: The ID used to identify the NFT within the given collection. + /// Is used within the context of `pallet_nfts`. + /// - `asset_id`: The ID of the asset being returned and destroyed. Must match + /// the original ID of the created asset, corresponding to the NFT. + /// Is used within the context of `pallet_assets`. + /// - `beneficiary`: The account that will receive the unified NFT. + /// + /// Emits `NftUnified` event when successful. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::unify())] + pub fn unify( + origin: OriginFor, + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + asset_id: AssetIdOf, + beneficiary: AccountIdLookupOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + NftToAsset::::try_mutate_exists((nft_collection_id, nft_id), |maybe_details| { + let details = maybe_details.take().ok_or(Error::::NftNotFractionalized)?; + ensure!(details.asset == asset_id, Error::::IncorrectAssetId); + + let deposit = details.deposit; + let asset_creator = details.asset_creator; + Self::do_burn_asset(asset_id.clone(), &who, details.fractions)?; + Self::do_unlock_nft(nft_collection_id, nft_id, &beneficiary)?; + T::Currency::release( + &HoldReason::Fractionalized.into(), + &asset_creator, + deposit, + BestEffort, + )?; + + Self::deposit_event(Event::NftUnified { + nft_collection: nft_collection_id, + nft: nft_id, + asset: asset_id, + beneficiary, + }); + + Ok(()) + }) + } + } + + impl Pallet { + /// The account ID of the pallet. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache + /// the value and only call this once. + fn get_pallet_account() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + + /// Prevent further transferring of NFT. + fn do_lock_nft(nft_collection_id: T::NftCollectionId, nft_id: T::NftId) -> DispatchResult { + T::Nfts::disable_transfer(&nft_collection_id, &nft_id) + } + + /// Remove the transfer lock and transfer the NFT to the account returning the tokens. + fn do_unlock_nft( + nft_collection_id: T::NftCollectionId, + nft_id: T::NftId, + account: &T::AccountId, + ) -> DispatchResult { + T::Nfts::enable_transfer(&nft_collection_id, &nft_id)?; + T::Nfts::transfer(&nft_collection_id, &nft_id, account) + } + + /// Create the new asset. + fn do_create_asset(asset_id: AssetIdOf, admin: T::AccountId) -> DispatchResult { + T::Assets::create(asset_id, admin, false, One::one()) + } + + /// Mint the `amount` of tokens with `asset_id` into the beneficiary's account. + fn do_mint_asset( + asset_id: AssetIdOf, + beneficiary: &T::AccountId, + amount: AssetBalanceOf, + ) -> DispatchResult { + T::Assets::mint_into(asset_id, beneficiary, amount)?; + Ok(()) + } + + /// Burn tokens from the account. + fn do_burn_asset( + asset_id: AssetIdOf, + account: &T::AccountId, + amount: AssetBalanceOf, + ) -> DispatchResult { + T::Assets::burn_from(asset_id.clone(), account, amount, Exact, Polite)?; + T::Assets::start_destroy(asset_id, None) + } + + /// Set the metadata for the newly created asset. + fn do_set_metadata( + asset_id: AssetIdOf, + depositor: &T::AccountId, + pallet_account: &T::AccountId, + nft_collection_id: &T::NftCollectionId, + nft_id: &T::NftId, + ) -> DispatchResult { + let name = format!( + "{} {nft_collection_id}-{nft_id}", + String::from_utf8_lossy(&T::NewAssetName::get()) + ); + let symbol: &[u8] = &T::NewAssetSymbol::get(); + let existential_deposit = T::Currency::minimum_balance(); + let pallet_account_balance = T::Currency::balance(&pallet_account); + + if pallet_account_balance < existential_deposit { + T::Currency::transfer(&depositor, &pallet_account, existential_deposit, Preserve)?; + } + let metadata_deposit = T::Assets::calc_metadata_deposit(name.as_bytes(), symbol); + if !metadata_deposit.is_zero() { + T::Currency::transfer(&depositor, &pallet_account, metadata_deposit, Preserve)?; + } + T::Assets::set(asset_id, &pallet_account, name.into(), symbol.into(), 0) + } + } +} diff --git a/frame/nft-fractionalization/src/mock.rs b/frame/nft-fractionalization/src/mock.rs new file mode 100644 index 0000000000000..6565adaf6fc7e --- /dev/null +++ b/frame/nft-fractionalization/src/mock.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Nft fractionalization pallet. + +use super::*; +use crate as pallet_nft_fractionalization; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + BoundedVec, PalletId, +}; +use frame_system::EnsureSigned; +use pallet_nfts::PalletFeatures; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + BuildStorage, MultiSignature, +}; + +type Block = frame_system::mocking::MockBlock; +type Signature = MultiSignature; +type AccountPublic = ::Signer; +type AccountId = ::AccountId; + +// Configure a mock runtime to test the pallet. +construct_runtime!( + pub enum Test + { + System: frame_system, + NftFractionalization: pallet_nft_fractionalization, + Assets: pallet_assets, + Balances: pallet_balances, + Nfts: pallet_nfts, + } +); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type MaxHolds = ConstU32<1>; + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = u64; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = ConstU64<1>; + type AssetAccountDeposit = ConstU64<10>; + type MetadataDepositBase = ConstU64<1>; + type MetadataDepositPerByte = ConstU64<1>; + type ApprovalDeposit = ConstU64<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type CallbackHandle = (); + type WeightInfo = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +parameter_types! { + pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); +} + +impl pallet_nfts::Config for Test { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Locker = (); + type CollectionDeposit = ConstU64<2>; + type ItemDeposit = ConstU64<1>; + type MetadataDepositBase = ConstU64<1>; + type AttributeDepositBase = ConstU64<1>; + type DepositPerByte = ConstU64<1>; + type StringLimit = ConstU32<50>; + type KeyLimit = ConstU32<50>; + type ValueLimit = ConstU32<50>; + type ApprovalsLimit = ConstU32<10>; + type ItemAttributesApprovalsLimit = ConstU32<2>; + type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = ConstU64<10000>; + type MaxAttributesPerCall = ConstU32<2>; + type Features = Features; + type OffchainSignature = Signature; + type OffchainPublic = AccountPublic; + type WeightInfo = (); + pallet_nfts::runtime_benchmarks_enabled! { + type Helper = (); + } +} + +parameter_types! { + pub const StringLimit: u32 = 50; + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Deposit = ConstU64<1>; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = ::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionalizationPalletId; + type WeightInfo = (); + type StringLimit = StringLimit; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + type RuntimeHoldReason = RuntimeHoldReason; +} + +// Build genesis storage according to the mock runtime. +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/nft-fractionalization/src/tests.rs b/frame/nft-fractionalization/src/tests.rs new file mode 100644 index 0000000000000..b82402bda1e67 --- /dev/null +++ b/frame/nft-fractionalization/src/tests.rs @@ -0,0 +1,305 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Nft fractionalization pallet. + +use crate::{mock::*, *}; +use frame_support::{ + assert_noop, assert_ok, + traits::{ + fungible::{hold::Inspect as InspectHold, Mutate as MutateFungible}, + fungibles::{metadata::Inspect, InspectEnumerable}, + }, +}; +use pallet_nfts::CollectionConfig; +use sp_runtime::{DispatchError, ModuleError, TokenError::FundsUnavailable}; + +fn assets() -> Vec { + let mut s: Vec<_> = <::Assets>::asset_ids().collect(); + s.sort(); + s +} + +fn events() -> Vec> { + let result = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let mock::RuntimeEvent::NftFractionalization(inner) = e { + Some(inner) + } else { + None + } + }) + .collect(); + + System::reset_events(); + + result +} + +type AccountIdOf = ::AccountId; + +fn account(id: u8) -> AccountIdOf { + [id; 32].into() +} + +#[test] +fn fractionalize_should_work() { + new_test_ext().execute_with(|| { + let nft_collection_id = 0; + let nft_id = 0; + let asset_id = 0; + let fractions = 1000; + + Balances::set_balance(&account(1), 100); + Balances::set_balance(&account(2), 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + CollectionConfig::default(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(1), + None, + )); + + assert_ok!(NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(2), + fractions, + )); + assert_eq!(assets(), vec![asset_id]); + assert_eq!(Assets::balance(asset_id, account(2)), fractions); + assert_eq!(Balances::total_balance_on_hold(&account(1)), 2); + assert_eq!(String::from_utf8(Assets::name(0)).unwrap(), "Frac 0-0"); + assert_eq!(String::from_utf8(Assets::symbol(0)).unwrap(), "FRAC"); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(1))); + assert_noop!( + Nfts::transfer( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(2), + ), + DispatchError::Module(ModuleError { + index: 4, + error: [12, 0, 0, 0], + message: Some("ItemLocked") + }) + ); + + let details = NftToAsset::::get((&nft_collection_id, &nft_id)).unwrap(); + assert_eq!(details.asset, asset_id); + assert_eq!(details.fractions, fractions); + + assert!(events().contains(&Event::::NftFractionalized { + nft_collection: nft_collection_id, + nft: nft_id, + fractions, + asset: asset_id, + beneficiary: account(2), + })); + + // owner can't burn an already fractionalized NFT + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(account(1)), nft_collection_id, nft_id), + DispatchError::Module(ModuleError { + index: 4, + error: [12, 0, 0, 0], + message: Some("ItemLocked") + }) + ); + + // can't fractionalize twice + assert_noop!( + NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id + 1, + account(2), + fractions, + ), + DispatchError::Module(ModuleError { + index: 4, + error: [12, 0, 0, 0], + message: Some("ItemLocked") + }) + ); + + let nft_id = nft_id + 1; + assert_noop!( + NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(2), + fractions, + ), + Error::::NftNotFound + ); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(2), + None + )); + assert_noop!( + NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(2), + fractions, + ), + Error::::NoPermission + ); + }); +} + +#[test] +fn unify_should_work() { + new_test_ext().execute_with(|| { + let nft_collection_id = 0; + let nft_id = 0; + let asset_id = 0; + let fractions = 1000; + + Balances::set_balance(&account(1), 100); + Balances::set_balance(&account(2), 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + CollectionConfig::default(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + account(1), + None, + )); + assert_ok!(NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(2), + fractions, + )); + + assert_noop!( + NftFractionalization::unify( + RuntimeOrigin::signed(account(2)), + nft_collection_id + 1, + nft_id, + asset_id, + account(1), + ), + Error::::NftNotFractionalized + ); + assert_noop!( + NftFractionalization::unify( + RuntimeOrigin::signed(account(2)), + nft_collection_id, + nft_id, + asset_id + 1, + account(1), + ), + Error::::IncorrectAssetId + ); + + // can't unify the asset a user doesn't hold + assert_noop!( + NftFractionalization::unify( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(1), + ), + DispatchError::Token(FundsUnavailable) + ); + + assert_ok!(NftFractionalization::unify( + RuntimeOrigin::signed(account(2)), + nft_collection_id, + nft_id, + asset_id, + account(1), + )); + + assert_eq!(Assets::balance(asset_id, account(2)), 0); + assert_eq!(Balances::reserved_balance(&account(1)), 1); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(1))); + assert!(!NftToAsset::::contains_key((&nft_collection_id, &nft_id))); + + assert!(events().contains(&Event::::NftUnified { + nft_collection: nft_collection_id, + nft: nft_id, + asset: asset_id, + beneficiary: account(1), + })); + + // validate we need to hold the full balance to un-fractionalize the NFT + let asset_id = asset_id + 1; + assert_ok!(NftFractionalization::fractionalize( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(1), + fractions, + )); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(account(1)), asset_id, account(2), 1)); + assert_eq!(Assets::balance(asset_id, account(1)), fractions - 1); + assert_eq!(Assets::balance(asset_id, account(2)), 1); + assert_noop!( + NftFractionalization::unify( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(1), + ), + DispatchError::Token(FundsUnavailable) + ); + + assert_ok!(Assets::transfer(RuntimeOrigin::signed(account(2)), asset_id, account(1), 1)); + assert_ok!(NftFractionalization::unify( + RuntimeOrigin::signed(account(1)), + nft_collection_id, + nft_id, + asset_id, + account(2), + )); + assert_eq!(Nfts::owner(nft_collection_id, nft_id), Some(account(2))); + }); +} diff --git a/frame/nft-fractionalization/src/types.rs b/frame/nft-fractionalization/src/types.rs new file mode 100644 index 0000000000000..cbaaf5f5160d3 --- /dev/null +++ b/frame/nft-fractionalization/src/types.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the Nft fractionalization pallet. + +use super::*; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::{fungible::Inspect as FunInspect, fungibles::Inspect}; +use scale_info::TypeInfo; +use sp_runtime::traits::StaticLookup; + +pub type AssetIdOf = <::Assets as Inspect<::AccountId>>::AssetId; +pub type AssetBalanceOf = + <::Assets as Inspect<::AccountId>>::Balance; +pub type DepositOf = + <::Currency as FunInspect<::AccountId>>::Balance; +pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +/// Stores the details of a fractionalized item. +#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] +pub struct Details { + /// Minted asset. + pub asset: AssetId, + + /// Number of fractions minted. + pub fractions: Fractions, + + /// Reserved deposit for creating a new asset. + pub deposit: Deposit, + + /// Account that fractionalized an item. + pub asset_creator: AccountId, +} + +/// Benchmark Helper +#[cfg(feature = "runtime-benchmarks")] +pub trait BenchmarkHelper { + /// Returns an asset id from a given integer. + fn asset(id: u32) -> AssetId; + /// Returns a collection id from a given integer. + fn collection(id: u32) -> CollectionId; + /// Returns an nft id from a given integer. + fn nft(id: u32) -> ItemId; +} + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelper for () +where + AssetId: From, + CollectionId: From, + ItemId: From, +{ + fn asset(id: u32) -> AssetId { + id.into() + } + fn collection(id: u32) -> CollectionId { + id.into() + } + fn nft(id: u32) -> ItemId { + id.into() + } +} diff --git a/frame/nft-fractionalization/src/weights.rs b/frame/nft-fractionalization/src/weights.rs new file mode 100644 index 0000000000000..ebb4aa0fbcfba --- /dev/null +++ b/frame/nft-fractionalization/src/weights.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_nft_fractionalization +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/substrate +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_nft_fractionalization +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/nft-fractionalization/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_nft_fractionalization. +pub trait WeightInfo { + fn fractionalize() -> Weight; + fn unify() -> Weight; +} + +/// Weights for pallet_nft_fractionalization using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: Nfts Item (r:1 w:0) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Assets Metadata (r:1 w:1) + /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: NftFractionalization NftToAsset (r:0 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + fn fractionalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `609` + // Estimated: `4326` + // Minimum execution time: 187_416_000 picoseconds. + Weight::from_parts(191_131_000, 4326) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) + } + /// Storage: NftFractionalization NftToAsset (r:1 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts CollectionConfigOf (r:1 w:0) + /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: Nfts ItemConfigOf (r:1 w:0) + /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: Nfts Item (r:1 w:1) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: Nfts Account (r:0 w:1) + /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: Nfts ItemPriceOf (r:0 w:1) + /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: Nfts PendingSwapOf (r:0 w:1) + /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + fn unify() -> Weight { + // Proof Size summary in bytes: + // Measured: `1422` + // Estimated: `4326` + // Minimum execution time: 134_159_000 picoseconds. + Weight::from_parts(136_621_000, 4326) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: Nfts Item (r:1 w:0) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Assets Metadata (r:1 w:1) + /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: NftFractionalization NftToAsset (r:0 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + fn fractionalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `609` + // Estimated: `4326` + // Minimum execution time: 187_416_000 picoseconds. + Weight::from_parts(191_131_000, 4326) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) + } + /// Storage: NftFractionalization NftToAsset (r:1 w:1) + /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: Assets Asset (r:1 w:1) + /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: Assets Account (r:1 w:1) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:1) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: Nfts Collection (r:1 w:1) + /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts CollectionConfigOf (r:1 w:0) + /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: Nfts ItemConfigOf (r:1 w:0) + /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: Nfts Item (r:1 w:1) + /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: Nfts Account (r:0 w:1) + /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: Nfts ItemPriceOf (r:0 w:1) + /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: Nfts PendingSwapOf (r:0 w:1) + /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + fn unify() -> Weight { + // Proof Size summary in bytes: + // Measured: `1422` + // Estimated: `4326` + // Minimum execution time: 134_159_000 picoseconds. + Weight::from_parts(136_621_000, 4326) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) + } +} diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml index 73df7518e96c0..d378803f5de94 100644 --- a/frame/nfts/Cargo.toml +++ b/frame/nfts/Cargo.toml @@ -13,21 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } enumflags2 = { version = "0.7.7" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } [features] default = ["std"] @@ -42,10 +42,19 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-keystore/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nfts/runtime-api/Cargo.toml b/frame/nfts/runtime-api/Cargo.toml index 29d79e7768515..bf5f9f0ec001f 100644 --- a/frame/nfts/runtime-api/Cargo.toml +++ b/frame/nfts/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../nfts" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs index 68252ebfc9cac..995c842036746 100644 --- a/frame/nfts/src/benchmarking.rs +++ b/frame/nfts/src/benchmarking.rs @@ -30,7 +30,7 @@ use frame_support::{ traits::{EnsureOrigin, Get}, BoundedVec, }; -use frame_system::RawOrigin as SystemOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; use sp_io::crypto::{sr25519_generate, sr25519_sign}; use sp_runtime::{ traits::{Bounded, IdentifyAccount, One}, @@ -247,7 +247,7 @@ benchmarks_instance_pallet! { let call = Call::::create { admin, config: default_collection_config::() }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); + assert_last_event::(Event::NextCollectionIdIncremented { next_id: Some(T::Helper::collection(1)) }.into()); } force_create { @@ -255,7 +255,7 @@ benchmarks_instance_pallet! { let caller_lookup = T::Lookup::unlookup(caller.clone()); }: _(SystemOrigin::Root, caller_lookup, default_collection_config::()) verify { - assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); + assert_last_event::(Event::NextCollectionIdIncremented { next_id: Some(T::Helper::collection(1)) }.into()); } destroy { @@ -589,7 +589,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - let deadline = T::BlockNumber::max_value(); + let deadline = BlockNumberFor::::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); @@ -601,7 +601,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = T::BlockNumber::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { @@ -614,7 +614,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = T::BlockNumber::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { @@ -680,9 +680,9 @@ benchmarks_instance_pallet! { let buyer_lookup = T::Lookup::unlookup(buyer.clone()); let price = ItemPrice::::from(0u32); let origin = SystemOrigin::Signed(seller.clone()).into(); - Nfts::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + Nfts::::set_price(origin, collection, item, Some(price), Some(buyer_lookup))?; T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); - }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) + }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price) verify { assert_last_event::(Event::ItemBought { collection, @@ -828,6 +828,7 @@ benchmarks_instance_pallet! { metadata: metadata.clone(), only_account: None, deadline: One::one(), + mint_price: Some(DepositBalanceOf::::min_value()), }; let message = Encode::encode(&mint_data); let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &caller_public, &message).unwrap()); @@ -835,7 +836,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); frame_system::Pallet::::set_block_number(One::one()); - }: _(SystemOrigin::Signed(target.clone()), mint_data, signature.into(), caller) + }: _(SystemOrigin::Signed(target.clone()), Box::new(mint_data), signature.into(), caller) verify { let metadata: BoundedVec<_, _> = metadata.try_into().unwrap(); assert_last_event::(Event::ItemMetadataSet { collection, item, data: metadata }.into()); diff --git a/frame/nfts/src/common_functions.rs b/frame/nfts/src/common_functions.rs index a3486edec23cb..1ad523d664c7c 100644 --- a/frame/nfts/src/common_functions.rs +++ b/frame/nfts/src/common_functions.rs @@ -31,7 +31,12 @@ impl, I: 'static> Pallet { Collection::::get(collection).map(|i| i.owner) } - /// Validate the `data` was signed by `signer` and the `signature` is correct. + /// Validates the signature of the given data with the provided signer's account ID. + /// + /// # Errors + /// + /// This function returns a [`WrongSignature`](crate::Error::WrongSignature) error if the + /// signature is invalid or the verification process fails. pub fn validate_signature( data: &Vec, signature: &T::OffchainSignature, @@ -55,6 +60,12 @@ impl, I: 'static> Pallet { Ok(()) } + pub(crate) fn set_next_collection_id(collection: T::CollectionId) { + let next_id = collection.increment(); + NextCollectionId::::set(next_id); + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); + } + #[cfg(any(test, feature = "runtime-benchmarks"))] pub fn set_next_id(id: T::CollectionId) { NextCollectionId::::set(Some(id)); @@ -62,6 +73,8 @@ impl, I: 'static> Pallet { #[cfg(test)] pub fn get_next_id() -> T::CollectionId { - NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()) + NextCollectionId::::get() + .or(T::CollectionId::initial_value()) + .expect("Failed to get next collection ID") } } diff --git a/frame/nfts/src/features/approvals.rs b/frame/nfts/src/features/approvals.rs index 634436a8562d8..053fa67163b99 100644 --- a/frame/nfts/src/features/approvals.rs +++ b/frame/nfts/src/features/approvals.rs @@ -15,16 +15,38 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper functions for the approval logic implemented in the NFTs pallet. +//! The bitflag [`PalletFeature::Approvals`] needs to be set in [`Config::Features`] for NFTs +//! to have the functionality defined in this module. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Approves the transfer of an item to a delegate. + /// + /// This function is used to approve the transfer of the specified `item` in the `collection` to + /// a `delegate`. If `maybe_check_origin` is specified, the function ensures that the + /// `check_origin` account is the owner of the item, granting them permission to approve the + /// transfer. The `delegate` is the account that will be allowed to take control of the item. + /// Optionally, a `deadline` can be specified to set a time limit for the approval. The + /// `deadline` is expressed in block numbers and is added to the current block number to + /// determine the absolute deadline for the approval. After approving the transfer, the function + /// emits the `TransferApproved` event. + /// + /// - `maybe_check_origin`: The optional account that is required to be the owner of the item, + /// granting permission to approve the transfer. If `None`, no permission check is performed. + /// - `collection`: The identifier of the collection containing the item to be transferred. + /// - `item`: The identifier of the item to be transferred. + /// - `delegate`: The account that will be allowed to take control of the item. + /// - `maybe_deadline`: The optional deadline (in block numbers) specifying the time limit for + /// the approval. pub(crate) fn do_approve_transfer( maybe_check_origin: Option, collection: T::CollectionId, item: T::ItemId, delegate: T::AccountId, - maybe_deadline: Option<::BlockNumber>, + maybe_deadline: Option>, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Approvals), @@ -63,6 +85,20 @@ impl, I: 'static> Pallet { Ok(()) } + /// Cancels the approval for the transfer of an item to a delegate. + /// + /// This function is used to cancel the approval for the transfer of the specified `item` in the + /// `collection` to a `delegate`. If `maybe_check_origin` is specified, the function ensures + /// that the `check_origin` account is the owner of the item or that the approval is past its + /// deadline, granting permission to cancel the approval. After canceling the approval, the + /// function emits the `ApprovalCancelled` event. + /// + /// - `maybe_check_origin`: The optional account that is required to be the owner of the item or + /// that the approval is past its deadline, granting permission to cancel the approval. If + /// `None`, no permission check is performed. + /// - `collection`: The identifier of the collection containing the item. + /// - `item`: The identifier of the item. + /// - `delegate`: The account that was previously allowed to take control of the item. pub(crate) fn do_cancel_approval( maybe_check_origin: Option, collection: T::CollectionId, @@ -100,6 +136,19 @@ impl, I: 'static> Pallet { Ok(()) } + /// Clears all transfer approvals for an item. + /// + /// This function is used to clear all transfer approvals for the specified `item` in the + /// `collection`. If `maybe_check_origin` is specified, the function ensures that the + /// `check_origin` account is the owner of the item, granting permission to clear all transfer + /// approvals. After clearing all approvals, the function emits the `AllApprovalsCancelled` + /// event. + /// + /// - `maybe_check_origin`: The optional account that is required to be the owner of the item, + /// granting permission to clear all transfer approvals. If `None`, no permission check is + /// performed. + /// - `collection`: The collection ID containing the item. + /// - `item`: The item ID for which transfer approvals will be cleared. pub(crate) fn do_clear_all_transfer_approvals( maybe_check_origin: Option, collection: T::CollectionId, diff --git a/frame/nfts/src/features/atomic_swap.rs b/frame/nfts/src/features/atomic_swap.rs index 505056be95353..830283b73c2aa 100644 --- a/frame/nfts/src/features/atomic_swap.rs +++ b/frame/nfts/src/features/atomic_swap.rs @@ -15,6 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper functions for performing atomic swaps implemented in the NFTs +//! pallet. +//! The bitflag [`PalletFeature::Swaps`] needs to be set in [`Config::Features`] for NFTs +//! to have the functionality defined in this module. + use crate::*; use frame_support::{ pallet_prelude::*, @@ -22,6 +27,25 @@ use frame_support::{ }; impl, I: 'static> Pallet { + /// Creates a new swap offer for the specified item. + /// + /// This function is used to create a new swap offer for the specified item. The `caller` + /// account must be the owner of the item. The swap offer specifies the `offered_collection`, + /// `offered_item`, `desired_collection`, `maybe_desired_item`, `maybe_price`, and `duration`. + /// The `duration` specifies the deadline by which the swap must be claimed. If + /// `maybe_desired_item` is `Some`, the specified item is expected in return for the swap. If + /// `maybe_desired_item` is `None`, it indicates that any item from the `desired_collection` can + /// be offered in return. The `maybe_price` specifies an optional price for the swap. If + /// specified, the other party must offer the specified `price` or higher for the swap. After + /// creating the swap, the function emits the `SwapCreated` event. + /// + /// - `caller`: The account creating the swap offer, which must be the owner of the item. + /// - `offered_collection_id`: The collection ID containing the offered item. + /// - `offered_item_id`: The item ID offered for the swap. + /// - `desired_collection_id`: The collection ID containing the desired item (if any). + /// - `maybe_desired_item_id`: The ID of the desired item (if any). + /// - `maybe_price`: The optional price for the swap. + /// - `duration`: The duration (in block numbers) specifying the deadline for the swap claim. pub(crate) fn do_create_swap( caller: T::AccountId, offered_collection_id: T::CollectionId, @@ -29,7 +53,7 @@ impl, I: 'static> Pallet { desired_collection_id: T::CollectionId, maybe_desired_item_id: Option, maybe_price: Option>>, - duration: ::BlockNumber, + duration: frame_system::pallet_prelude::BlockNumberFor, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Swaps), @@ -77,7 +101,16 @@ impl, I: 'static> Pallet { Ok(()) } - + /// Cancels the specified swap offer. + /// + /// This function is used to cancel the specified swap offer created by the `caller` account. If + /// the swap offer's deadline has not yet passed, the `caller` must be the owner of the offered + /// item; otherwise, anyone can cancel an expired offer. + /// After canceling the swap offer, the function emits the `SwapCancelled` event. + /// + /// - `caller`: The account canceling the swap offer. + /// - `offered_collection_id`: The collection ID containing the offered item. + /// - `offered_item_id`: The item ID offered for the swap. pub(crate) fn do_cancel_swap( caller: T::AccountId, offered_collection_id: T::CollectionId, @@ -107,6 +140,23 @@ impl, I: 'static> Pallet { Ok(()) } + /// Claims the specified swap offer. + /// + /// This function is used to claim a swap offer specified by the `send_collection_id`, + /// `send_item_id`, `receive_collection_id`, and `receive_item_id`. The `caller` account must be + /// the owner of the item specified by `send_collection_id` and `send_item_id`. If the claimed + /// swap has an associated `price`, it will be transferred between the owners of the two items + /// based on the `price.direction`. After the swap is completed, the function emits the + /// `SwapClaimed` event. + /// + /// - `caller`: The account claiming the swap offer, which must be the owner of the sent item. + /// - `send_collection_id`: The identifier of the collection containing the item being sent. + /// - `send_item_id`: The identifier of the item being sent for the swap. + /// - `receive_collection_id`: The identifier of the collection containing the item being + /// received. + /// - `receive_item_id`: The identifier of the item being received in the swap. + /// - `witness_price`: The optional witness price for the swap (price that was offered in the + /// swap). pub(crate) fn do_claim_swap( caller: T::AccountId, send_collection_id: T::CollectionId, diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs index 9098679fa9145..28f7bd2c58ce7 100644 --- a/frame/nfts/src/features/attributes.rs +++ b/frame/nfts/src/features/attributes.rs @@ -15,10 +15,38 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to configure attributes for items and collections in the +//! NFTs pallet. +//! The bitflag [`PalletFeature::Attributes`] needs to be set in [`Config::Features`] for NFTs +//! to have the functionality defined in this module. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Sets the attribute of an item or a collection. + /// + /// This function is used to set an attribute for an item or a collection. It checks the + /// provided `namespace` and verifies the permission of the caller to perform the action. The + /// `collection` and `maybe_item` parameters specify the target for the attribute. + /// + /// - `origin`: The account attempting to set the attribute. + /// - `collection`: The identifier of the collection to which the item belongs, or the + /// collection itself if setting a collection attribute. + /// - `maybe_item`: The identifier of the item to which the attribute belongs, or `None` if + /// setting a collection attribute. + /// - `namespace`: The namespace in which the attribute is being set. It can be either + /// `CollectionOwner`, `ItemOwner`, or `Account` (pre-approved external address). + /// - `key`: The key of the attribute. It should be a vector of bytes within the limits defined + /// by `T::KeyLimit`. + /// - `value`: The value of the attribute. It should be a vector of bytes within the limits + /// defined by `T::ValueLimit`. + /// - `depositor`: The account that is paying the deposit for the attribute. + /// + /// Note: For the `CollectionOwner` namespace, the collection/item must have the + /// `UnlockedAttributes` setting enabled. + /// The deposit for setting an attribute is based on the `T::DepositPerByte` and + /// `T::AttributeDepositBase` configuration. pub(crate) fn do_set_attribute( origin: T::AccountId, collection: T::CollectionId, @@ -128,6 +156,23 @@ impl, I: 'static> Pallet { Ok(()) } + /// Sets the attribute of an item or a collection without performing deposit checks. + /// + /// This function is used to force-set an attribute for an item or a collection without + /// performing the deposit checks. It bypasses the deposit requirement and should only be used + /// in specific situations where deposit checks are not necessary or handled separately. + /// + /// - `set_as`: The account that would normally pay for the deposit. + /// - `collection`: The identifier of the collection to which the item belongs, or the + /// collection itself if setting a collection attribute. + /// - `maybe_item`: The identifier of the item to which the attribute belongs, or `None` if + /// setting a collection attribute. + /// - `namespace`: The namespace in which the attribute is being set. It can be either + /// `CollectionOwner`, `ItemOwner`, or `Account` (pre-approved external address). + /// - `key`: The key of the attribute. It should be a vector of bytes within the limits defined + /// by `T::KeyLimit`. + /// - `value`: The value of the attribute. It should be a vector of bytes within the limits + /// defined by `T::ValueLimit`. pub(crate) fn do_force_set_attribute( set_as: Option, collection: T::CollectionId, @@ -159,6 +204,15 @@ impl, I: 'static> Pallet { Ok(()) } + /// Sets multiple attributes for an item or a collection. + /// + /// This function checks the pre-signed data is valid and updates the attributes of an item or + /// collection. It is limited by [`Config::MaxAttributesPerCall`] to prevent excessive storage + /// consumption in a single transaction. + /// + /// - `origin`: The account initiating the transaction. + /// - `data`: The data containing the details of the pre-signed attributes to be set. + /// - `signer`: The account of the pre-signed attributes signer. pub(crate) fn do_set_attributes_pre_signed( origin: T::AccountId, data: PreSignedAttributesOf, @@ -212,6 +266,22 @@ impl, I: 'static> Pallet { Ok(()) } + /// Clears an attribute of an item or a collection. + /// + /// This function allows clearing an attribute from an item or a collection. It verifies the + /// permission of the caller to perform the action based on the provided `namespace` and + /// `depositor` account. The deposit associated with the attribute, if any, will be unreserved. + /// + /// - `maybe_check_origin`: An optional account that acts as an additional security check when + /// clearing the attribute. This can be `None` if no additional check is required. + /// - `collection`: The identifier of the collection to which the item belongs, or the + /// collection itself if clearing a collection attribute. + /// - `maybe_item`: The identifier of the item to which the attribute belongs, or `None` if + /// clearing a collection attribute. + /// - `namespace`: The namespace in which the attribute is being cleared. It can be either + /// `CollectionOwner`, `ItemOwner`, or `Account`. + /// - `key`: The key of the attribute to be cleared. It should be a vector of bytes within the + /// limits defined by `T::KeyLimit`. pub(crate) fn do_clear_attribute( maybe_check_origin: Option, collection: T::CollectionId, @@ -288,6 +358,17 @@ impl, I: 'static> Pallet { Ok(()) } + /// Approves a delegate to set attributes on behalf of the item's owner. + /// + /// This function allows the owner of an item to approve a delegate to set attributes in the + /// `Account(delegate)` namespace. The maximum number of approvals is determined by + /// the configuration `T::MaxAttributesApprovals`. + /// + /// - `check_origin`: The account of the item's owner attempting to approve the delegate. + /// - `collection`: The identifier of the collection to which the item belongs. + /// - `item`: The identifier of the item for which the delegate is being approved. + /// - `delegate`: The account that is being approved to set attributes on behalf of the item's + /// owner. pub(crate) fn do_approve_item_attributes( check_origin: T::AccountId, collection: T::CollectionId, @@ -312,6 +393,22 @@ impl, I: 'static> Pallet { }) } + /// Cancels the approval of an item's attributes by a delegate. + /// + /// This function allows the owner of an item to cancel the approval of a delegate to set + /// attributes in the `Account(delegate)` namespace. The delegate's approval is removed, in + /// addition to attributes the `delegate` previously created, and any unreserved deposit + /// is returned. The number of attributes that the delegate has set for the item must + /// not exceed the `account_attributes` provided in the `witness`. + /// This function is used to prevent unintended or malicious cancellations. + /// + /// - `check_origin`: The account of the item's owner attempting to cancel the delegate's + /// approval. + /// - `collection`: The identifier of the collection to which the item belongs. + /// - `item`: The identifier of the item for which the delegate's approval is being canceled. + /// - `delegate`: The account whose approval is being canceled. + /// - `witness`: The witness containing the number of attributes set by the delegate for the + /// item. pub(crate) fn do_cancel_item_attributes_approval( check_origin: T::AccountId, collection: T::CollectionId, @@ -355,6 +452,7 @@ impl, I: 'static> Pallet { }) } + /// A helper method to check whether an attribute namespace is valid. fn is_valid_namespace( origin: &T::AccountId, namespace: &AttributeNamespace, @@ -381,17 +479,47 @@ impl, I: 'static> Pallet { Ok(result) } - /// A helper method to construct attribute's key. + /// A helper method to construct an attribute's key. + /// + /// # Errors + /// + /// This function returns an [`IncorrectData`](crate::Error::IncorrectData) error if the + /// provided attribute `key` is too long. pub fn construct_attribute_key( key: Vec, ) -> Result, DispatchError> { Ok(BoundedVec::try_from(key).map_err(|_| Error::::IncorrectData)?) } - /// A helper method to construct attribute's value. + /// A helper method to construct an attribute's value. + /// + /// # Errors + /// + /// This function returns an [`IncorrectData`](crate::Error::IncorrectData) error if the + /// provided `value` is too long. pub fn construct_attribute_value( value: Vec, ) -> Result, DispatchError> { Ok(BoundedVec::try_from(value).map_err(|_| Error::::IncorrectData)?) } + + /// A helper method to check whether a system attribute is set for a given item. + /// + /// # Errors + /// + /// This function returns an [`IncorrectData`](crate::Error::IncorrectData) error if the + /// provided pallet attribute is too long. + pub fn has_system_attribute( + collection: &T::CollectionId, + item: &T::ItemId, + attribute_key: PalletAttributes, + ) -> Result { + let attribute = ( + &collection, + Some(item), + AttributeNamespace::Pallet, + &Self::construct_attribute_key(attribute_key.encode())?, + ); + Ok(Attribute::::contains_key(attribute)) + } } diff --git a/frame/nfts/src/features/buy_sell.rs b/frame/nfts/src/features/buy_sell.rs index ad721e0748ad0..d6ec6f50d2724 100644 --- a/frame/nfts/src/features/buy_sell.rs +++ b/frame/nfts/src/features/buy_sell.rs @@ -15,6 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper functions to perform the buy and sell functionalities of the NFTs +//! pallet. +//! The bitflag [`PalletFeature::Trading`] needs to be set in the [`Config::Features`] for NFTs +//! to have the functionality defined in this module. + use crate::*; use frame_support::{ pallet_prelude::*, @@ -22,6 +27,16 @@ use frame_support::{ }; impl, I: 'static> Pallet { + /// Pays the specified tips to the corresponding receivers. + /// + /// This function is used to pay tips from the `sender` account to multiple receivers. The tips + /// are specified as a `BoundedVec` of `ItemTipOf` with a maximum length of `T::MaxTips`. For + /// each tip, the function transfers the `amount` to the `receiver` account. The sender is + /// responsible for ensuring the validity of the provided tips. + /// + /// - `sender`: The account that pays the tips. + /// - `tips`: A `BoundedVec` containing the tips to be paid, where each tip contains the + /// `collection`, `item`, `receiver`, and `amount`. pub(crate) fn do_pay_tips( sender: T::AccountId, tips: BoundedVec, T::MaxTips>, @@ -40,6 +55,20 @@ impl, I: 'static> Pallet { Ok(()) } + /// Sets the price and whitelists a buyer for an item in the specified collection. + /// + /// This function is used to set the price and whitelist a buyer for an item in the + /// specified `collection`. The `sender` account must be the owner of the item. The item's price + /// and the whitelisted buyer can be set to allow trading the item. If `price` is `None`, the + /// item will be marked as not for sale. + /// + /// - `collection`: The identifier of the collection containing the item. + /// - `item`: The identifier of the item for which the price and whitelist information will be + /// set. + /// - `sender`: The account that sets the price and whitelist information for the item. + /// - `price`: The optional price for the item. + /// - `whitelisted_buyer`: The optional account that is whitelisted to buy the item at the set + /// price. pub(crate) fn do_set_price( collection: T::CollectionId, item: T::ItemId, @@ -83,6 +112,19 @@ impl, I: 'static> Pallet { Ok(()) } + /// Buys the specified item from the collection. + /// + /// This function is used to buy an item from the specified `collection`. The `buyer` account + /// will attempt to buy the item with the provided `bid_price`. The item's current owner will + /// receive the bid price if it is equal to or higher than the item's set price. If + /// `whitelisted_buyer` is specified in the item's price information, only that account is + /// allowed to buy the item. If the item is not for sale, or the bid price is too low, the + /// function will return an error. + /// + /// - `collection`: The identifier of the collection containing the item to be bought. + /// - `item`: The identifier of the item to be bought. + /// - `buyer`: The account that attempts to buy the item. + /// - `bid_price`: The bid price offered by the buyer for the item. pub(crate) fn do_buy_item( collection: T::CollectionId, item: T::ItemId, diff --git a/frame/nfts/src/features/create_delete_collection.rs b/frame/nfts/src/features/create_delete_collection.rs index e9434760628ec..e343ad18e504f 100644 --- a/frame/nfts/src/features/create_delete_collection.rs +++ b/frame/nfts/src/features/create_delete_collection.rs @@ -15,10 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to perform functionality associated with creating and +//! destroying collections for the NFTs pallet. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Create a new collection with the given `collection`, `owner`, `admin`, `config`, `deposit`, + /// and `event`. + /// + /// This function creates a new collection with the provided parameters. It reserves the + /// required deposit from the owner's account, sets the collection details, assigns admin roles, + /// and inserts the provided configuration. Finally, it emits the specified event upon success. + /// + /// # Errors + /// + /// This function returns a [`CollectionIdInUse`](crate::Error::CollectionIdInUse) error if the + /// collection ID is already in use. pub fn do_create_collection( collection: T::CollectionId, owner: T::AccountId, @@ -50,17 +64,33 @@ impl, I: 'static> Pallet { ), ); - let next_id = collection.increment(); - CollectionConfigOf::::insert(&collection, config); CollectionAccount::::insert(&owner, &collection, ()); - NextCollectionId::::set(Some(next_id)); - - Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); Self::deposit_event(event); Ok(()) } + /// Destroy the specified collection with the given `collection`, `witness`, and + /// `maybe_check_owner`. + /// + /// This function destroys the specified collection if it exists and meets the necessary + /// conditions. It checks the provided `witness` against the actual collection details and + /// removes the collection along with its associated metadata, attributes, and configurations. + /// The necessary deposits are returned to the corresponding accounts, and the roles and + /// configurations for the collection are cleared. Finally, it emits the `Destroyed` event upon + /// successful destruction. + /// + /// # Errors + /// + /// This function returns a dispatch error in the following cases: + /// - If the collection ID is not found + /// ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - If the provided `maybe_check_owner` does not match the actual owner + /// ([`NoPermission`](crate::Error::NoPermission)). + /// - If the collection is not empty (contains items) + /// ([`CollectionNotEmpty`](crate::Error::CollectionNotEmpty)). + /// - If the `witness` does not match the actual collection details + /// ([`BadWitness`](crate::Error::BadWitness)). pub fn do_destroy_collection( collection: T::CollectionId, witness: DestroyWitness, diff --git a/frame/nfts/src/features/create_delete_item.rs b/frame/nfts/src/features/create_delete_item.rs index 2aa27dc066619..37f64ae1b1b99 100644 --- a/frame/nfts/src/features/create_delete_item.rs +++ b/frame/nfts/src/features/create_delete_item.rs @@ -15,10 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to perform functionality associated with minting and burning +//! items for the NFTs pallet. + use crate::*; -use frame_support::pallet_prelude::*; +use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; impl, I: 'static> Pallet { + /// Mint a new unique item with the given `collection`, `item`, and other minting configuration + /// details. + /// + /// This function performs the minting of a new unique item. It checks if the item does not + /// already exist in the given collection, and if the max supply limit (if configured) is not + /// reached. It also reserves the required deposit for the item and sets the item details + /// accordingly. + /// + /// # Errors + /// + /// This function returns a dispatch error in the following cases: + /// - If the collection ID is invalid ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - If the item already exists in the collection + /// ([`AlreadyExists`](crate::Error::AlreadyExists)). + /// - If the item configuration already exists + /// ([`InconsistentItemConfig`](crate::Error::InconsistentItemConfig)). + /// - If the max supply limit (if configured) for the collection is reached + /// ([`MaxSupplyReached`](crate::Error::MaxSupplyReached)). + /// - If any error occurs in the `with_details_and_config` closure. pub fn do_mint( collection: T::CollectionId, item: T::ItemId, @@ -86,13 +108,33 @@ impl, I: 'static> Pallet { Ok(()) } + /// Mints a new item using a pre-signed message. + /// + /// This function allows minting a new item using a pre-signed message. The minting process is + /// similar to the regular minting process, but it is performed by a pre-authorized account. The + /// `mint_to` account receives the newly minted item. The minting process is configurable + /// through the provided `mint_data`. The attributes, metadata, and price of the item are set + /// according to the provided `mint_data`. The `with_details_and_config` closure is called to + /// validate the provided `collection_details` and `collection_config` before minting the item. + /// + /// - `mint_to`: The account that receives the newly minted item. + /// - `mint_data`: The pre-signed minting data containing the `collection`, `item`, + /// `attributes`, `metadata`, `deadline`, `only_account`, and `mint_price`. + /// - `signer`: The account that is authorized to mint the item using the pre-signed message. pub(crate) fn do_mint_pre_signed( mint_to: T::AccountId, mint_data: PreSignedMintOf, signer: T::AccountId, ) -> DispatchResult { - let PreSignedMint { collection, item, attributes, metadata, deadline, only_account } = - mint_data; + let PreSignedMint { + collection, + item, + attributes, + metadata, + deadline, + only_account, + mint_price, + } = mint_data; let metadata = Self::construct_metadata(metadata)?; ensure!( @@ -118,7 +160,17 @@ impl, I: 'static> Pallet { Some(mint_to.clone()), mint_to.clone(), item_config, - |_, _| Ok(()), + |collection_details, _| { + if let Some(price) = mint_price { + T::Currency::transfer( + &mint_to, + &collection_details.owner, + price, + ExistenceRequirement::KeepAlive, + )?; + } + Ok(()) + }, )?; let admin_account = Self::find_account_by_role(&collection, CollectionRole::Admin); if let Some(admin_account) = admin_account { @@ -146,12 +198,23 @@ impl, I: 'static> Pallet { Ok(()) } + /// Burns the specified item with the given `collection`, `item`, and `with_details`. + /// + /// # Errors + /// + /// This function returns a dispatch error in the following cases: + /// - If the collection ID is invalid ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - If the item is locked ([`ItemLocked`](crate::Error::ItemLocked)). pub fn do_burn( collection: T::CollectionId, item: T::ItemId, with_details: impl FnOnce(&ItemDetailsFor) -> DispatchResult, ) -> DispatchResult { ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + ensure!( + !Self::has_system_attribute(&collection, &item, PalletAttributes::TransferDisabled)?, + Error::::ItemLocked + ); let item_config = Self::get_item_config(&collection, &item)?; // NOTE: if item's settings are not empty (e.g. item's metadata is locked) // then we keep the config record and don't remove it diff --git a/frame/nfts/src/features/lock.rs b/frame/nfts/src/features/lock.rs index 8b4914baeb450..1c3c9c8672fbd 100644 --- a/frame/nfts/src/features/lock.rs +++ b/frame/nfts/src/features/lock.rs @@ -15,10 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to configure locks on collections and items for the NFTs +//! pallet. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Locks a collection with specified settings. + /// + /// The origin must be the owner of the collection to lock it. This function disables certain + /// settings on the collection. The only setting that can't be disabled is `DepositRequired`. + /// + /// Note: it's possible only to lock the setting, but not to unlock it after. + + /// + /// - `origin`: The origin of the transaction, representing the account attempting to lock the + /// collection. + /// - `collection`: The identifier of the collection to be locked. + /// - `lock_settings`: The collection settings to be locked. pub(crate) fn do_lock_collection( origin: T::AccountId, collection: T::CollectionId, @@ -41,6 +56,16 @@ impl, I: 'static> Pallet { }) } + /// Locks the transfer of an item within a collection. + /// + /// The origin must have the `Freezer` role within the collection to lock the transfer of the + /// item. This function disables the `Transferable` setting on the item, preventing it from + /// being transferred to other accounts. + /// + /// - `origin`: The origin of the transaction, representing the account attempting to lock the + /// item transfer. + /// - `collection`: The identifier of the collection to which the item belongs. + /// - `item`: The identifier of the item to be locked for transfer. pub(crate) fn do_lock_item_transfer( origin: T::AccountId, collection: T::CollectionId, @@ -61,6 +86,16 @@ impl, I: 'static> Pallet { Ok(()) } + /// Unlocks the transfer of an item within a collection. + /// + /// The origin must have the `Freezer` role within the collection to unlock the transfer of the + /// item. This function enables the `Transferable` setting on the item, allowing it to be + /// transferred to other accounts. + /// + /// - `origin`: The origin of the transaction, representing the account attempting to unlock the + /// item transfer. + /// - `collection`: The identifier of the collection to which the item belongs. + /// - `item`: The identifier of the item to be unlocked for transfer. pub(crate) fn do_unlock_item_transfer( origin: T::AccountId, collection: T::CollectionId, @@ -81,6 +116,21 @@ impl, I: 'static> Pallet { Ok(()) } + /// Locks the metadata and attributes of an item within a collection. + /// + /// The origin must have the `Admin` role within the collection to lock the metadata and + /// attributes of the item. This function disables the `UnlockedMetadata` and + /// `UnlockedAttributes` settings on the item, preventing modifications to its metadata and + /// attributes. + /// + /// - `maybe_check_origin`: An optional origin representing the account attempting to lock the + /// item properties. If provided, this account must have the `Admin` role within the + /// collection. If `None`, no permission check is performed, and the function can be called + /// from any origin. + /// - `collection`: The identifier of the collection to which the item belongs. + /// - `item`: The identifier of the item to be locked for properties. + /// - `lock_metadata`: A boolean indicating whether to lock the metadata of the item. + /// - `lock_attributes`: A boolean indicating whether to lock the attributes of the item. pub(crate) fn do_lock_item_properties( maybe_check_origin: Option, collection: T::CollectionId, diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs index fde0296784d11..e177f39bb8b81 100644 --- a/frame/nfts/src/features/metadata.rs +++ b/frame/nfts/src/features/metadata.rs @@ -15,11 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to configure the metadata of collections and items. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { - /// Note: if `maybe_depositor` is None, that means the depositor will be a collection's owner + /// Sets the metadata for a specific item within a collection. + /// + /// - `maybe_check_origin`: An optional account ID that is allowed to set the metadata. If + /// `None`, it's considered the root account. + /// - `collection`: The ID of the collection to which the item belongs. + /// - `item`: The ID of the item to set the metadata for. + /// - `data`: The metadata to set for the item. + /// - `maybe_depositor`: An optional account ID that will provide the deposit for the metadata. + /// If `None`, the collection's owner provides the deposit. + /// + /// Emits `ItemMetadataSet` event upon successful setting of the metadata. + /// Returns `Ok(())` on success, or one of the following dispatch errors: + /// - `UnknownCollection`: The specified collection does not exist. + /// - `UnknownItem`: The specified item does not exist within the collection. + /// - `LockedItemMetadata`: The metadata for the item is locked and cannot be modified. + /// - `NoPermission`: The caller does not have the required permission to set the metadata. + /// - `DepositExceeded`: The deposit amount exceeds the maximum allowed value. pub(crate) fn do_set_item_metadata( maybe_check_origin: Option, collection: T::CollectionId, @@ -91,6 +109,19 @@ impl, I: 'static> Pallet { }) } + /// Clears the metadata for a specific item within a collection. + /// + /// - `maybe_check_origin`: An optional account ID that is allowed to clear the metadata. If + /// `None`, it's considered the root account. + /// - `collection`: The ID of the collection to which the item belongs. + /// - `item`: The ID of the item for which to clear the metadata. + /// + /// Emits `ItemMetadataCleared` event upon successful clearing of the metadata. + /// Returns `Ok(())` on success, or one of the following dispatch errors: + /// - `UnknownCollection`: The specified collection does not exist. + /// - `MetadataNotFound`: The metadata for the specified item was not found. + /// - `LockedItemMetadata`: The metadata for the item is locked and cannot be modified. + /// - `NoPermission`: The caller does not have the required permission to clear the metadata. pub(crate) fn do_clear_item_metadata( maybe_check_origin: Option, collection: T::CollectionId, @@ -131,6 +162,19 @@ impl, I: 'static> Pallet { Ok(()) } + /// Sets the metadata for a specific collection. + /// + /// - `maybe_check_origin`: An optional account ID that is allowed to set the collection + /// metadata. If `None`, it's considered the root account. + /// - `collection`: The ID of the collection for which to set the metadata. + /// - `data`: The metadata to set for the collection. + /// + /// Emits `CollectionMetadataSet` event upon successful setting of the metadata. + /// Returns `Ok(())` on success, or one of the following dispatch errors: + /// - `UnknownCollection`: The specified collection does not exist. + /// - `LockedCollectionMetadata`: The metadata for the collection is locked and cannot be + /// modified. + /// - `NoPermission`: The caller does not have the required permission to set the metadata. pub(crate) fn do_set_collection_metadata( maybe_check_origin: Option, collection: T::CollectionId, @@ -179,6 +223,19 @@ impl, I: 'static> Pallet { }) } + /// Clears the metadata for a specific collection. + /// + /// - `maybe_check_origin`: An optional account ID that is allowed to clear the collection + /// metadata. If `None`, it's considered the root account. + /// - `collection`: The ID of the collection for which to clear the metadata. + /// + /// Emits `CollectionMetadataCleared` event upon successful clearing of the metadata. + /// Returns `Ok(())` on success, or one of the following dispatch errors: + /// - `UnknownCollection`: The specified collection does not exist. + /// - `MetadataNotFound`: The metadata for the collection was not found. + /// - `LockedCollectionMetadata`: The metadata for the collection is locked and cannot be + /// modified. + /// - `NoPermission`: The caller does not have the required permission to clear the metadata. pub(crate) fn do_clear_collection_metadata( maybe_check_origin: Option, collection: T::CollectionId, @@ -209,6 +266,11 @@ impl, I: 'static> Pallet { } /// A helper method to construct metadata. + /// + /// # Errors + /// + /// This function returns an [`IncorrectMetadata`](crate::Error::IncorrectMetadata) dispatch + /// error if the provided metadata is too long. pub fn construct_metadata( metadata: Vec, ) -> Result, DispatchError> { diff --git a/frame/nfts/src/features/roles.rs b/frame/nfts/src/features/roles.rs index 3bac002069cf3..f6d2785fd9cb4 100644 --- a/frame/nfts/src/features/roles.rs +++ b/frame/nfts/src/features/roles.rs @@ -15,11 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to configure account roles for existing collections. + use crate::*; use frame_support::pallet_prelude::*; use sp_std::collections::btree_map::BTreeMap; impl, I: 'static> Pallet { + /// Set the team roles for a specific collection. + /// + /// - `maybe_check_owner`: An optional account ID used to check ownership permission. If `None`, + /// it is considered as the root. + /// - `collection`: The ID of the collection for which to set the team roles. + /// - `issuer`: An optional account ID representing the issuer role. + /// - `admin`: An optional account ID representing the admin role. + /// - `freezer`: An optional account ID representing the freezer role. + /// + /// This function allows the owner or the root (when `maybe_check_owner` is `None`) to set the + /// team roles for a specific collection. The root can change the role from `None` to + /// `Some(account)`, but other roles can only be updated by the root or an account with an + /// existing role in the collection. pub(crate) fn do_set_team( maybe_check_owner: Option, collection: T::CollectionId, @@ -59,10 +74,10 @@ impl, I: 'static> Pallet { let account_to_role = Self::group_roles_by_account(roles); - // delete the previous records + // Delete the previous records. Self::clear_roles(&collection)?; - // insert new records + // Insert new records. for (account, roles) in account_to_role { CollectionRoleOf::::insert(&collection, &account, roles); } @@ -76,8 +91,9 @@ impl, I: 'static> Pallet { /// /// - `collection_id`: A collection to clear the roles in. /// - /// Throws an error if some of the roles were left in storage. - /// This means the `CollectionRoles::max_roles()` needs to be adjusted. + /// This function clears all the roles associated with the given `collection_id`. It throws an + /// error if some of the roles were left in storage, indicating that the maximum number of roles + /// may need to be adjusted. pub(crate) fn clear_roles(collection_id: &T::CollectionId) -> Result<(), DispatchError> { let res = CollectionRoleOf::::clear_prefix( &collection_id, @@ -94,7 +110,7 @@ impl, I: 'static> Pallet { /// - `account_id`: An account to check the role for. /// - `role`: A role to validate. /// - /// Returns boolean. + /// Returns `true` if the account has the specified role, `false` otherwise. pub(crate) fn has_role( collection_id: &T::CollectionId, account_id: &T::AccountId, @@ -123,7 +139,7 @@ impl, I: 'static> Pallet { /// /// - `input`: A vector of (Account, Role) tuples. /// - /// Returns a grouped vector. + /// Returns a grouped vector of `(Account, Roles)` tuples. pub fn group_roles_by_account( input: Vec<(T::AccountId, CollectionRole)>, ) -> Vec<(T::AccountId, CollectionRoles)> { diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs index 080d7b97f13b1..d4f7533ffa4eb 100644 --- a/frame/nfts/src/features/settings.rs +++ b/frame/nfts/src/features/settings.rs @@ -15,10 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module provides helper methods to configure collection settings for the NFTs pallet. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Forcefully change the configuration of a collection. + /// + /// - `collection`: The ID of the collection for which to update the configuration. + /// - `config`: The new collection configuration to set. + /// + /// This function allows for changing the configuration of a collection without any checks. + /// It updates the collection configuration and emits a `CollectionConfigChanged` event. pub(crate) fn do_force_collection_config( collection: T::CollectionId, config: CollectionConfigFor, @@ -29,6 +38,22 @@ impl, I: 'static> Pallet { Ok(()) } + /// Set the maximum supply for a collection. + /// + /// - `maybe_check_owner`: An optional account ID used to check permissions. + /// - `collection`: The ID of the collection for which to set the maximum supply. + /// - `max_supply`: The new maximum supply to set for the collection. + /// + /// This function checks if the setting `UnlockedMaxSupply` is enabled in the collection + /// configuration. If it is not enabled, it returns an `Error::MaxSupplyLocked`. If + /// `maybe_check_owner` is `Some(owner)`, it checks if the caller of the function is the + /// owner of the collection. If the caller is not the owner and the `maybe_check_owner` + /// parameter is provided, it returns an `Error::NoPermission`. + /// + /// It also checks if the new maximum supply is greater than the current number of items in + /// the collection, and if not, it returns an `Error::MaxSupplyTooSmall`. If all checks pass, + /// it updates the collection configuration with the new maximum supply and emits a + /// `CollectionMaxSupplySet` event. pub(crate) fn do_set_collection_max_supply( maybe_check_owner: Option, collection: T::CollectionId, @@ -56,12 +81,24 @@ impl, I: 'static> Pallet { }) } + /// Update the mint settings for a collection. + /// + /// - `maybe_check_origin`: An optional account ID used to check issuer permissions. + /// - `collection`: The ID of the collection for which to update the mint settings. + /// - `mint_settings`: The new mint settings to set for the collection. + /// + /// This function updates the mint settings for a collection. If `maybe_check_origin` is + /// `Some(origin)`, it checks if the caller of the function has the `CollectionRole::Issuer` + /// for the given collection. If the caller doesn't have the required permission and + /// `maybe_check_origin` is provided, it returns an `Error::NoPermission`. If all checks + /// pass, it updates the collection configuration with the new mint settings and emits a + /// `CollectionMintSettingsUpdated` event. pub(crate) fn do_update_mint_settings( maybe_check_origin: Option, collection: T::CollectionId, mint_settings: MintSettings< BalanceOf, - ::BlockNumber, + frame_system::pallet_prelude::BlockNumberFor, T::CollectionId, >, ) -> DispatchResult { @@ -80,6 +117,13 @@ impl, I: 'static> Pallet { }) } + /// Get the configuration for a specific collection. + /// + /// - `collection_id`: The ID of the collection for which to retrieve the configuration. + /// + /// This function attempts to fetch the configuration (`CollectionConfigFor`) associated + /// with the given `collection_id`. If the configuration exists, it returns `Ok(config)`, + /// otherwise, it returns a `DispatchError` with `Error::NoConfig`. pub(crate) fn get_collection_config( collection_id: &T::CollectionId, ) -> Result, DispatchError> { @@ -88,6 +132,14 @@ impl, I: 'static> Pallet { Ok(config) } + /// Get the configuration for a specific item within a collection. + /// + /// - `collection_id`: The ID of the collection to which the item belongs. + /// - `item_id`: The ID of the item for which to retrieve the configuration. + /// + /// This function attempts to fetch the configuration (`ItemConfig`) associated with the given + /// `collection_id` and `item_id`. If the configuration exists, it returns `Ok(config)`, + /// otherwise, it returns a `DispatchError` with `Error::UnknownItem`. pub(crate) fn get_item_config( collection_id: &T::CollectionId, item_id: &T::ItemId, @@ -97,6 +149,14 @@ impl, I: 'static> Pallet { Ok(config) } + /// Get the default item settings for a specific collection. + /// + /// - `collection_id`: The ID of the collection for which to retrieve the default item settings. + /// + /// This function fetches the `default_item_settings` from the collection configuration + /// associated with the given `collection_id`. If the collection configuration exists, it + /// returns `Ok(default_item_settings)`, otherwise, it returns a `DispatchError` with + /// `Error::NoConfig`. pub(crate) fn get_default_item_settings( collection_id: &T::CollectionId, ) -> Result { @@ -104,6 +164,13 @@ impl, I: 'static> Pallet { Ok(collection_config.mint_settings.default_item_settings) } + /// Check if a specified pallet feature is enabled. + /// + /// - `feature`: The feature to check. + /// + /// This function checks if the given `feature` is enabled in the runtime using the + /// pallet's `T::Features::get()` function. It returns `true` if the feature is enabled, + /// otherwise it returns `false`. pub(crate) fn is_pallet_feature_enabled(feature: PalletFeature) -> bool { let features = T::Features::get(); return features.is_enabled(feature) diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs index 00b5d4e76882a..0471bd67b2916 100644 --- a/frame/nfts/src/features/transfer.rs +++ b/frame/nfts/src/features/transfer.rs @@ -15,10 +15,34 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! This module contains helper methods to perform the transfer functionalities +//! of the NFTs pallet. + use crate::*; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { + /// Transfer an NFT to the specified destination account. + /// + /// - `collection`: The ID of the collection to which the NFT belongs. + /// - `item`: The ID of the NFT to transfer. + /// - `dest`: The destination account to which the NFT will be transferred. + /// - `with_details`: A closure that provides access to the collection and item details, + /// allowing customization of the transfer process. + /// + /// This function performs the actual transfer of an NFT to the destination account. + /// It checks various conditions like item lock status and transferability settings + /// for the collection and item before transferring the NFT. + /// + /// # Errors + /// + /// This function returns a dispatch error in the following cases: + /// - If the collection ID is invalid ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - If the item ID is invalid ([`UnknownItem`](crate::Error::UnknownItem)). + /// - If the item is locked or transferring it is disabled + /// ([`ItemLocked`](crate::Error::ItemLocked)). + /// - If the collection or item is non-transferable + /// ([`ItemsNonTransferable`](crate::Error::ItemsNonTransferable)). pub fn do_transfer( collection: T::CollectionId, item: T::ItemId, @@ -28,40 +52,57 @@ impl, I: 'static> Pallet { &mut ItemDetailsFor, ) -> DispatchResult, ) -> DispatchResult { + // Retrieve collection details. let collection_details = Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + // Ensure the item is not locked. ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + // Ensure the item is not transfer disabled on the system level attribute. + ensure!( + !Self::has_system_attribute(&collection, &item, PalletAttributes::TransferDisabled)?, + Error::::ItemLocked + ); + + // Retrieve collection config and check if items are transferable. let collection_config = Self::get_collection_config(&collection)?; ensure!( collection_config.is_setting_enabled(CollectionSetting::TransferableItems), Error::::ItemsNonTransferable ); + // Retrieve item config and check if the item is transferable. let item_config = Self::get_item_config(&collection, &item)?; ensure!( item_config.is_setting_enabled(ItemSetting::Transferable), Error::::ItemLocked ); + // Retrieve the item details. let mut details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + // Perform the transfer with custom details using the provided closure. with_details(&collection_details, &mut details)?; + // Update account ownership information. Account::::remove((&details.owner, &collection, &item)); Account::::insert((&dest, &collection, &item), ()); let origin = details.owner; details.owner = dest; - // The approved accounts have to be reset to None, because otherwise pre-approve attack + // The approved accounts have to be reset to `None`, because otherwise pre-approve attack // would be possible, where the owner can approve their second account before making the // transaction and then claiming the item back. details.approvals.clear(); + // Update item details. Item::::insert(&collection, &item, &details); ItemPriceOf::::remove(&collection, &item); PendingSwapOf::::remove(&collection, &item); + // Emit `Transferred` event. Self::deposit_event(Event::Transferred { collection, item, @@ -71,16 +112,28 @@ impl, I: 'static> Pallet { Ok(()) } + /// Transfer ownership of a collection to another account. + /// + /// - `origin`: The account requesting the transfer. + /// - `collection`: The ID of the collection to transfer ownership. + /// - `owner`: The new account that will become the owner of the collection. + /// + /// This function transfers the ownership of a collection to the specified account. + /// It performs checks to ensure that the `origin` is the current owner and that the + /// new owner is an acceptable account based on the collection's acceptance settings. pub(crate) fn do_transfer_ownership( origin: T::AccountId, collection: T::CollectionId, owner: T::AccountId, ) -> DispatchResult { + // Check if the new owner is acceptable based on the collection's acceptance settings. let acceptable_collection = OwnershipAcceptance::::get(&owner); ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); + // Try to retrieve and mutate the collection details. Collection::::try_mutate(collection, |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + // Check if the `origin` is the current owner of the collection. ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { return Ok(()) @@ -93,17 +146,28 @@ impl, I: 'static> Pallet { details.owner_deposit, Reserved, )?; + + // Update account ownership information. CollectionAccount::::remove(&details.owner, &collection); CollectionAccount::::insert(&owner, &collection, ()); details.owner = owner.clone(); OwnershipAcceptance::::remove(&owner); + // Emit `OwnerChanged` event. Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); Ok(()) }) } - + /// Set or unset the ownership acceptance for an account regarding a specific collection. + /// + /// - `who`: The account for which to set or unset the ownership acceptance. + /// - `maybe_collection`: An optional collection ID to set the ownership acceptance. + /// + /// If `maybe_collection` is `Some(collection)`, then the account `who` will accept + /// ownership transfers for the specified collection. If `maybe_collection` is `None`, + /// then the account `who` will unset the ownership acceptance, effectively refusing + /// ownership transfers for any collection. pub(crate) fn do_set_accept_ownership( who: T::AccountId, maybe_collection: Option, @@ -123,14 +187,25 @@ impl, I: 'static> Pallet { } else { OwnershipAcceptance::::remove(&who); } + + // Emit `OwnershipAcceptanceChanged` event. Self::deposit_event(Event::OwnershipAcceptanceChanged { who, maybe_collection }); Ok(()) } + /// Forcefully change the owner of a collection. + /// + /// - `collection`: The ID of the collection to change ownership. + /// - `owner`: The new account that will become the owner of the collection. + /// + /// This function allows for changing the ownership of a collection without any checks. + /// It moves the deposit to the new owner, updates the collection's owner, and emits + /// an `OwnerChanged` event. pub(crate) fn do_force_collection_owner( collection: T::CollectionId, owner: T::AccountId, ) -> DispatchResult { + // Try to retrieve and mutate the collection details. Collection::::try_mutate(collection, |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; if details.owner == owner { @@ -145,10 +220,12 @@ impl, I: 'static> Pallet { Reserved, )?; + // Update collection accounts and set the new owner. CollectionAccount::::remove(&details.owner, &collection); CollectionAccount::::insert(&owner, &collection, ()); details.owner = owner.clone(); + // Emit `OwnerChanged` event. Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); Ok(()) }) diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs index ef6bbe7656ef8..4e2593b4057d7 100644 --- a/frame/nfts/src/impl_nonfungibles.rs +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -117,6 +117,11 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// /// Default implementation is that all items are transferable. fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { + use PalletAttributes::TransferDisabled; + match Self::has_system_attribute(&collection, &item, TransferDisabled) { + Ok(transfer_disabled) if transfer_disabled => return false, + _ => (), + } match ( CollectionConfigOf::::get(collection), ItemConfigOf::::get(collection, item), @@ -130,6 +135,18 @@ impl, I: 'static> Inspect<::AccountId> for Palle } } +impl, I: 'static> InspectRole<::AccountId> for Pallet { + fn is_issuer(collection: &Self::CollectionId, who: &::AccountId) -> bool { + Self::has_role(collection, who, CollectionRole::Issuer) + } + fn is_admin(collection: &Self::CollectionId, who: &::AccountId) -> bool { + Self::has_role(collection, who, CollectionRole::Admin) + } + fn is_freezer(collection: &Self::CollectionId, who: &::AccountId) -> bool { + Self::has_role(collection, who, CollectionRole::Freezer) + } +} + impl, I: 'static> Create<::AccountId, CollectionConfigFor> for Pallet { @@ -145,8 +162,9 @@ impl, I: 'static> Create<::AccountId, Collection Error::::WrongSetting ); - let collection = - NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + let collection = NextCollectionId::::get() + .or(T::CollectionId::initial_value()) + .ok_or(Error::::UnknownCollection)?; Self::do_create_collection( collection, @@ -156,8 +174,40 @@ impl, I: 'static> Create<::AccountId, Collection T::CollectionDeposit::get(), Event::Created { collection, creator: who.clone(), owner: admin.clone() }, )?; + + Self::set_next_collection_id(collection); + Ok(collection) } + + /// Create a collection of nonfungible items with `collection` Id to be owned by `who` and + /// managed by `admin`. Should be only used for applications that do not have an + /// incremental order for the collection IDs and is a replacement for the auto id creation. + /// + /// + /// SAFETY: This function can break the pallet if it is used in combination with the auto + /// increment functionality, as it can claim a value in the ID sequence. + fn create_collection_with_id( + collection: T::CollectionId, + who: &T::AccountId, + admin: &T::AccountId, + config: &CollectionConfigFor, + ) -> Result<(), DispatchError> { + // DepositRequired can be disabled by calling the force_create() only + ensure!( + !config.has_disabled_setting(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); + + Self::do_create_collection( + collection, + who.clone(), + admin.clone(), + *config, + T::CollectionDeposit::get(), + Event::Created { collection, creator: who.clone(), owner: admin.clone() }, + ) + } } impl, I: 'static> Destroy<::AccountId> for Pallet { @@ -322,6 +372,30 @@ impl, I: 'static> Transfer for Pallet { ) -> DispatchResult { Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) } + + fn disable_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { + let transfer_disabled = + Self::has_system_attribute(&collection, &item, PalletAttributes::TransferDisabled)?; + // Can't lock the item twice + if transfer_disabled { + return Err(Error::::ItemLocked.into()) + } + + >::set_attribute( + collection, + item, + &PalletAttributes::::TransferDisabled.encode(), + &[], + ) + } + + fn enable_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> DispatchResult { + >::clear_attribute( + collection, + item, + &PalletAttributes::::TransferDisabled.encode(), + ) + } } impl, I: 'static> InspectEnumerable for Pallet { diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs index 4796819df6d2c..333fe97dd4a1e 100644 --- a/frame/nfts/src/lib.rs +++ b/frame/nfts/src/lib.rs @@ -37,6 +37,10 @@ pub mod mock; mod tests; mod common_functions; +/// A library providing the feature set of this pallet. It contains modules with helper methods that +/// perform storage updates and checks required by this pallet's dispatchables. To use pallet level +/// features, make sure to set appropriate bitflags for [`Config::Features`] in your runtime +/// configuration trait. mod features; mod impl_nonfungibles; mod types; @@ -46,7 +50,8 @@ pub mod weights; use codec::{Decode, Encode}; use frame_support::traits::{ - tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, + tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, Incrementable, + ReservableCurrency, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -62,6 +67,7 @@ pub use weights::WeightInfo; /// The log target of this pallet. pub const LOG_TARGET: &'static str = "runtime::nfts"; +/// A type alias for the account ID type used in the dispatchable functions of this pallet. type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] @@ -100,6 +106,14 @@ pub mod pallet { + IsType<::RuntimeEvent>; /// Identifier for the collection of item. + /// + /// SAFETY: The functions in the `Incrementable` trait are fallible. If the functions + /// of the implementation both return `None`, the automatic CollectionId generation + /// should not be used. So the `create` and `force_create` extrinsics and the + /// `create_collection` function will return an `UnknownCollection` Error. Instead use + /// the `create_collection_with_id` function. However, if the `Incrementable` trait + /// implementation has an incremental order, the `create_collection_with_id` function + /// should not be used as it can claim a value in the ID sequence. type CollectionId: Member + Parameter + MaxEncodedLen + Copy + Incrementable; /// The type used to identify a unique item within a collection. @@ -170,7 +184,7 @@ pub mod pallet { /// The max duration in blocks for deadlines. #[pallet::constant] - type MaxDeadlineDuration: Get<::BlockNumber>; + type MaxDeadlineDuration: Get>; /// The max number of attributes a user could set per call. #[pallet::constant] @@ -342,7 +356,7 @@ pub mod pallet { T::CollectionId, T::ItemId, PriceWithDirection>, - ::BlockNumber, + BlockNumberFor, >, OptionQuery, >; @@ -413,7 +427,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, - deadline: Option<::BlockNumber>, + deadline: Option>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -475,7 +489,7 @@ pub mod pallet { /// Mint settings for a collection had changed. CollectionMintSettingsUpdated { collection: T::CollectionId }, /// Event gets emitted when the `NextCollectionId` gets incremented. - NextCollectionIdIncremented { next_id: T::CollectionId }, + NextCollectionIdIncremented { next_id: Option }, /// The price was set for the item. ItemPriceSet { collection: T::CollectionId, @@ -508,7 +522,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: ::BlockNumber, + deadline: BlockNumberFor, }, /// The swap was cancelled. SwapCancelled { @@ -517,7 +531,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: ::BlockNumber, + deadline: BlockNumberFor, }, /// The swap has been claimed. SwapClaimed { @@ -528,7 +542,7 @@ pub mod pallet { received_item: T::ItemId, received_item_owner: T::AccountId, price: Option>>, - deadline: ::BlockNumber, + deadline: BlockNumberFor, }, /// New attributes have been set for an `item` of the `collection`. PreSignedAttributesSet { @@ -636,6 +650,8 @@ pub mod pallet { WrongNamespace, /// Can't delete non-empty collections. CollectionNotEmpty, + /// The witness data should be provided. + WitnessRequired, } #[pallet::call] @@ -662,8 +678,9 @@ pub mod pallet { admin: AccountIdLookupOf, config: CollectionConfigFor, ) -> DispatchResult { - let collection = - NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + let collection = NextCollectionId::::get() + .or(T::CollectionId::initial_value()) + .ok_or(Error::::UnknownCollection)?; let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -681,7 +698,10 @@ pub mod pallet { config, T::CollectionDeposit::get(), Event::Created { collection, creator: owner, owner: admin }, - ) + )?; + + Self::set_next_collection_id(collection); + Ok(()) } /// Issue a new collection of non-fungible items from a privileged origin. @@ -709,8 +729,9 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; - let collection = - NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + let collection = NextCollectionId::::get() + .or(T::CollectionId::initial_value()) + .ok_or(Error::::UnknownCollection)?; Self::do_create_collection( collection, @@ -719,7 +740,10 @@ pub mod pallet { config, Zero::zero(), Event::ForceCreated { collection, owner }, - ) + )?; + + Self::set_next_collection_id(collection); + Ok(()) } /// Destroy a collection of fungible items. @@ -771,7 +795,8 @@ pub mod pallet { /// - `item`: An identifier of the new item. /// - `mint_to`: Account into which the item will be minted. /// - `witness_data`: When the mint type is `HolderOf(collection_id)`, then the owned - /// item_id from that collection needs to be provided within the witness data object. + /// item_id from that collection needs to be provided within the witness data object. If + /// the mint price is set, then it should be additionally confirmed in the `witness_data`. /// /// Note: the deposit will be taken from the `origin` and not the `owner` of the `item`. /// @@ -785,7 +810,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, mint_to: AccountIdLookupOf, - witness_data: Option>, + witness_data: Option>>, ) -> DispatchResult { let caller = ensure_signed(origin)?; let mint_to = T::Lookup::lookup(mint_to)?; @@ -817,8 +842,8 @@ pub mod pallet { ); }, MintType::HolderOf(collection_id) => { - let MintWitness { owned_item } = - witness_data.ok_or(Error::::BadWitness)?; + let MintWitness { owned_item, .. } = + witness_data.clone().ok_or(Error::::WitnessRequired)?; let owns_item = Account::::contains_key(( &caller, @@ -858,6 +883,10 @@ pub mod pallet { } if let Some(price) = mint_settings.price { + let MintWitness { mint_price, .. } = + witness_data.clone().ok_or(Error::::WitnessRequired)?; + let mint_price = mint_price.ok_or(Error::::BadWitness)?; + ensure!(mint_price >= price, Error::::BadWitness); T::Currency::transfer( &caller, &collection_details.owner, @@ -1228,7 +1257,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, - maybe_deadline: Option<::BlockNumber>, + maybe_deadline: Option>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1651,11 +1680,7 @@ pub mod pallet { pub fn update_mint_settings( origin: OriginFor, collection: T::CollectionId, - mint_settings: MintSettings< - BalanceOf, - ::BlockNumber, - T::CollectionId, - >, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1751,7 +1776,7 @@ pub mod pallet { desired_collection: T::CollectionId, maybe_desired_item: Option, maybe_price: Option>>, - duration: ::BlockNumber, + duration: BlockNumberFor, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::do_create_swap( @@ -1835,13 +1860,13 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::mint_pre_signed(mint_data.attributes.len() as u32))] pub fn mint_pre_signed( origin: OriginFor, - mint_data: PreSignedMintOf, + mint_data: Box>, signature: T::OffchainSignature, signer: T::AccountId, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::validate_signature(&Encode::encode(&mint_data), &signature, &signer)?; - Self::do_mint_pre_signed(origin, mint_data, signer) + Self::do_mint_pre_signed(origin, *mint_data, signer) } /// Set attributes for an item by providing the pre-signed approval. diff --git a/frame/nfts/src/macros.rs b/frame/nfts/src/macros.rs index 8b0b8358dd7ff..1a601ce0927fa 100644 --- a/frame/nfts/src/macros.rs +++ b/frame/nfts/src/macros.rs @@ -15,25 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -macro_rules! impl_incrementable { - ($($type:ty),+) => { - $( - impl Incrementable for $type { - fn increment(&self) -> Self { - let mut val = self.clone(); - val.saturating_inc(); - val - } - - fn initial_value() -> Self { - 0 - } - } - )+ - }; -} -pub(crate) use impl_incrementable; - +/// Implements encoding and decoding traits for a wrapper type that represents +/// bitflags. The wrapper type should contain a field of type `$size`, where +/// `$size` is an integer type (e.g., u8, u16, u32) that can represent the bitflags. +/// The `$bitflag_enum` type is the enumeration type that defines the individual bitflags. +/// +/// This macro provides implementations for the following traits: +/// - `MaxEncodedLen`: Calculates the maximum encoded length for the wrapper type. +/// - `Encode`: Encodes the wrapper type using the provided encoding function. +/// - `EncodeLike`: Trait indicating the type can be encoded as is. +/// - `Decode`: Decodes the wrapper type from the input. +/// - `TypeInfo`: Provides type information for the wrapper type. macro_rules! impl_codec_bitflags { ($wrapper:ty, $size:ty, $bitflag_enum:ty) => { impl MaxEncodedLen for $wrapper { diff --git a/frame/nfts/src/migration.rs b/frame/nfts/src/migration.rs index 33ee87e4b9284..a4529148e35b6 100644 --- a/frame/nfts/src/migration.rs +++ b/frame/nfts/src/migration.rs @@ -18,6 +18,9 @@ use super::*; use frame_support::{log, traits::OnRuntimeUpgrade}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + pub mod v1 { use frame_support::{pallet_prelude::*, weights::Weight}; @@ -33,6 +36,7 @@ pub mod v1 { } impl OldCollectionDetails { + /// Migrates the old collection details to the new v1 format. fn migrate_to_v1(self, item_configs: u32) -> CollectionDetails { CollectionDetails { owner: self.owner, @@ -45,6 +49,7 @@ pub mod v1 { } } + /// A migration utility to update the storage version from v0 to v1 for the pallet. pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -90,7 +95,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { let current_version = Pallet::::current_storage_version(); let onchain_version = Pallet::::on_chain_storage_version(); ensure!(onchain_version == 0 && current_version == 1, "migration from version 0 to 1."); @@ -99,13 +104,13 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(prev_count: Vec) -> Result<(), &'static str> { + fn post_upgrade(prev_count: Vec) -> Result<(), TryRuntimeError> { let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( "the state parameter should be something that was generated by pre_upgrade", ); let post_count = Collection::::iter().count() as u32; - assert_eq!( - prev_count, post_count, + ensure!( + prev_count == post_count, "the records count before and after the migration should be the same" ); diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs index e2856a07b994c..f091a53f8d7c7 100644 --- a/frame/nfts/src/mock.rs +++ b/frame/nfts/src/mock.rs @@ -27,21 +27,16 @@ use frame_support::{ use sp_core::H256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, - MultiSignature, + BuildStorage, MultiSignature, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Nfts: pallet_nfts::{Pallet, Call, Storage, Event}, } @@ -57,13 +52,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -90,7 +84,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -131,7 +125,7 @@ impl Config for Test { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.register_extension(KeystoreExt::new(MemoryKeystore::new())); diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs index 4ab12f0506056..c94e75d343930 100644 --- a/frame/nfts/src/tests.rs +++ b/frame/nfts/src/tests.rs @@ -23,7 +23,7 @@ use frame_support::{ assert_noop, assert_ok, dispatch::Dispatchable, traits::{ - tokens::nonfungibles_v2::{Destroy, Mutate}, + tokens::nonfungibles_v2::{Create, Destroy, Mutate}, Currency, Get, }, }; @@ -369,7 +369,37 @@ fn mint_should_work() { MintSettings { mint_type: MintType::Public, price: Some(1), ..Default::default() } )); Balances::make_free_balance_be(&account(2), 100); - assert_ok!(Nfts::mint(RuntimeOrigin::signed(account(2)), 0, 43, account(2), None)); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(account(2)), 0, 43, account(2), None,), + Error::::WitnessRequired + ); + assert_noop!( + Nfts::mint( + RuntimeOrigin::signed(account(2)), + 0, + 43, + account(2), + Some(MintWitness { ..Default::default() }) + ), + Error::::BadWitness + ); + assert_noop!( + Nfts::mint( + RuntimeOrigin::signed(account(2)), + 0, + 43, + account(2), + Some(MintWitness { mint_price: Some(0), ..Default::default() }) + ), + Error::::BadWitness + ); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(account(2)), + 0, + 43, + account(2), + Some(MintWitness { mint_price: Some(1), ..Default::default() }) + )); assert_eq!(Balances::total_balance(&account(2)), 99); // validate types @@ -385,11 +415,11 @@ fn mint_should_work() { )); assert_noop!( Nfts::mint(RuntimeOrigin::signed(account(3)), 1, 42, account(3), None), - Error::::BadWitness + Error::::WitnessRequired ); assert_noop!( Nfts::mint(RuntimeOrigin::signed(account(2)), 1, 42, account(2), None), - Error::::BadWitness + Error::::WitnessRequired ); assert_noop!( Nfts::mint( @@ -397,7 +427,7 @@ fn mint_should_work() { 1, 42, account(2), - Some(MintWitness { owned_item: 42 }) + Some(MintWitness { owned_item: 42, ..Default::default() }) ), Error::::BadWitness ); @@ -406,7 +436,7 @@ fn mint_should_work() { 1, 42, account(2), - Some(MintWitness { owned_item: 43 }) + Some(MintWitness { owned_item: 43, ..Default::default() }) )); // can't mint twice @@ -416,7 +446,7 @@ fn mint_should_work() { 1, 46, account(2), - Some(MintWitness { owned_item: 43 }) + Some(MintWitness { owned_item: 43, ..Default::default() }) ), Error::::AlreadyClaimed ); @@ -3146,13 +3176,14 @@ fn validate_signature() { let user_1_pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); let user_1_signer = MultiSigner::Sr25519(user_1_pair.public()); let user_1 = user_1_signer.clone().into_account(); - let mint_data: PreSignedMint = PreSignedMint { + let mint_data: PreSignedMint = PreSignedMint { collection: 0, item: 0, attributes: vec![], metadata: vec![], only_account: None, deadline: 100000, + mint_price: None, }; let encoded_data = Encode::encode(&mint_data); let signature = MultiSignature::Sr25519(user_1_pair.sign(&encoded_data)); @@ -3182,6 +3213,7 @@ fn pre_signed_mints_should_work() { metadata: vec![0, 1], only_account: None, deadline: 10000000, + mint_price: Some(10), }; let message = Encode::encode(&mint_data); let signature = MultiSignature::Sr25519(user_1_pair.sign(&message)); @@ -3198,7 +3230,7 @@ fn pre_signed_mints_should_work() { assert_ok!(Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2.clone()), - mint_data.clone(), + Box::new(mint_data.clone()), signature.clone(), user_1.clone(), )); @@ -3228,13 +3260,13 @@ fn pre_signed_mints_should_work() { assert_eq!(deposit.account, Some(user_2.clone())); assert_eq!(deposit.amount, 3); - assert_eq!(Balances::free_balance(&user_0), 100 - 2); // 2 - collection deposit - assert_eq!(Balances::free_balance(&user_2), 100 - 1 - 3 - 6); // 1 - item deposit, 3 - metadata, 6 - attributes + assert_eq!(Balances::free_balance(&user_0), 100 - 2 + 10); // 2 - collection deposit, 10 - mint price + assert_eq!(Balances::free_balance(&user_2), 100 - 1 - 3 - 6 - 10); // 1 - item deposit, 3 - metadata, 6 - attributes, 10 - mint price assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2.clone()), - mint_data, + Box::new(mint_data), signature.clone(), user_1.clone(), ), @@ -3242,7 +3274,7 @@ fn pre_signed_mints_should_work() { ); assert_ok!(Nfts::burn(RuntimeOrigin::signed(user_2.clone()), 0, 0)); - assert_eq!(Balances::free_balance(&user_2), 100 - 6); + assert_eq!(Balances::free_balance(&user_2), 100 - 6 - 10); // validate the `only_account` field let mint_data = PreSignedMint { @@ -3252,13 +3284,14 @@ fn pre_signed_mints_should_work() { metadata: vec![], only_account: Some(account(2)), deadline: 10000000, + mint_price: None, }; // can't mint with the wrong signature assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2.clone()), - mint_data.clone(), + Box::new(mint_data.clone()), signature.clone(), user_1.clone(), ), @@ -3271,7 +3304,7 @@ fn pre_signed_mints_should_work() { assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_3), - mint_data.clone(), + Box::new(mint_data.clone()), signature.clone(), user_1.clone(), ), @@ -3283,7 +3316,7 @@ fn pre_signed_mints_should_work() { assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2.clone()), - mint_data, + Box::new(mint_data), signature, user_1.clone(), ), @@ -3299,6 +3332,7 @@ fn pre_signed_mints_should_work() { metadata: vec![], only_account: Some(account(2)), deadline: 10000000, + mint_price: None, }; let message = Encode::encode(&mint_data); let signature = MultiSignature::Sr25519(user_1_pair.sign(&message)); @@ -3306,7 +3340,7 @@ fn pre_signed_mints_should_work() { assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2.clone()), - mint_data, + Box::new(mint_data), signature, user_1.clone(), ), @@ -3321,13 +3355,14 @@ fn pre_signed_mints_should_work() { metadata: vec![0, 1], only_account: None, deadline: 10000000, + mint_price: None, }; let message = Encode::encode(&mint_data); let signature = MultiSignature::Sr25519(user_1_pair.sign(&message)); assert_noop!( Nfts::mint_pre_signed( RuntimeOrigin::signed(user_2), - mint_data, + Box::new(mint_data), signature, user_1.clone(), ), @@ -3643,3 +3678,41 @@ fn pre_signed_attributes_should_work() { ); }) } + +#[test] +fn basic_create_collection_with_id_should_work() { + new_test_ext().execute_with(|| { + assert_noop!( + Nfts::create_collection_with_id( + 0u32, + &account(1), + &account(1), + &default_collection_config(), + ), + Error::::WrongSetting + ); + + Balances::make_free_balance_be(&account(1), 100); + Balances::make_free_balance_be(&account(2), 100); + + assert_ok!(Nfts::create_collection_with_id( + 0u32, + &account(1), + &account(1), + &collection_config_with_all_settings_enabled(), + )); + + assert_eq!(collections(), vec![(account(1), 0)]); + + // CollectionId already taken. + assert_noop!( + Nfts::create_collection_with_id( + 0u32, + &account(2), + &account(2), + &collection_config_with_all_settings_enabled(), + ), + Error::::CollectionIdInUse + ); + }); +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs index fe6d31c12acec..f083b116fe938 100644 --- a/frame/nfts/src/types.rs +++ b/frame/nfts/src/types.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Various basic types for use in the Nfts pallet. +//! This module contains various basic types and data structures used in the NFTs pallet. use super::*; use crate::macros::*; @@ -26,60 +26,67 @@ use frame_support::{ traits::Get, BoundedBTreeMap, BoundedBTreeSet, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; +/// A type alias for handling balance deposits. pub(super) type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +/// A type alias representing the details of a collection. pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; +/// A type alias for keeping track of approvals used by a single item. pub(super) type ApprovalsOf = BoundedBTreeMap< ::AccountId, - Option<::BlockNumber>, + Option>, >::ApprovalsLimit, >; +/// A type alias for keeping track of approvals for an item's attributes. pub(super) type ItemAttributesApprovals = BoundedBTreeSet<::AccountId, >::ItemAttributesApprovalsLimit>; +/// A type that holds the deposit for a single item. pub(super) type ItemDepositOf = ItemDeposit, ::AccountId>; +/// A type that holds the deposit amount for an item's attribute. pub(super) type AttributeDepositOf = AttributeDeposit, ::AccountId>; +/// A type that holds the deposit amount for an item's metadata. pub(super) type ItemMetadataDepositOf = ItemMetadataDeposit, ::AccountId>; +/// A type that holds the details of a single item. pub(super) type ItemDetailsFor = ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; +/// A type alias for an accounts balance. pub(super) type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +/// A type alias to represent the price of an item. pub(super) type ItemPrice = BalanceOf; +/// A type alias for the tips held by a single item. pub(super) type ItemTipOf = ItemTip< >::CollectionId, >::ItemId, ::AccountId, BalanceOf, >; -pub(super) type CollectionConfigFor = CollectionConfig< - BalanceOf, - ::BlockNumber, - >::CollectionId, ->; +/// A type alias for the settings configuration of a collection. +pub(super) type CollectionConfigFor = + CollectionConfig, BlockNumberFor, >::CollectionId>; +/// A type alias for the pre-signed minting configuration for a specified collection. pub(super) type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, - ::BlockNumber, + BlockNumberFor, + BalanceOf, >; +/// A type alias for the pre-signed minting configuration on the attribute level of an item. pub(super) type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, - ::BlockNumber, + BlockNumberFor, >; -pub trait Incrementable { - fn increment(&self) -> Self; - fn initial_value() -> Self; -} -impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); - /// Information about a collection. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { @@ -123,10 +130,12 @@ impl CollectionDetails { } /// Witness data for items mint transactions. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct MintWitness { +#[derive(Clone, Encode, Decode, Default, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct MintWitness { /// Provide the id of the item in a required collection. pub owned_item: ItemId, + /// The price specified in mint settings. + pub mint_price: Option, } /// Information concerning the ownership of a single unique item. @@ -346,6 +355,8 @@ pub struct CancelAttributesApprovalWitness { pub enum PalletAttributes { /// Marks an item as being used in order to claim another item. UsedToClaim(CollectionId), + /// Marks an item as being restricted from transferring. + TransferDisabled, } /// Collection's configuration. @@ -504,7 +515,7 @@ impl CollectionRoles { impl_codec_bitflags!(CollectionRoles, u8, CollectionRole); #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct PreSignedMint { +pub struct PreSignedMint { /// A collection of the item to be minted. pub(super) collection: CollectionId, /// Item's ID. @@ -517,6 +528,8 @@ pub struct PreSignedMint { pub(super) only_account: Option, /// A deadline for the signature. pub(super) deadline: Deadline, + /// An optional price the claimer would need to pay for the mint. + pub(super) mint_price: Option, } #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs index 19a61974a61a7..6b8c577bb12e5 100644 --- a/frame/nfts/src/weights.rs +++ b/frame/nfts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_nfts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,8 +31,10 @@ // --steps=50 // --repeat=20 // --pallet=pallet_nfts +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/nfts/src/weights.rs @@ -42,9 +44,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_nfts. pub trait WeightInfo { @@ -104,10 +107,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `182` + // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_664_000 picoseconds. - Weight::from_parts(41_224_000, 3549) + // Minimum execution time: 40_489_000 picoseconds. + Weight::from_parts(41_320_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -123,10 +126,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3549` - // Minimum execution time: 24_725_000 picoseconds. - Weight::from_parts(25_147_000, 3549) + // Minimum execution time: 23_257_000 picoseconds. + Weight::from_parts(23_770_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -151,12 +154,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32186 + a * (332 ±0)` + // Measured: `32220 + a * (332 ±0)` // Estimated: `2523990 + a * (2921 ±0)` - // Minimum execution time: 1_100_509_000 picoseconds. - Weight::from_parts(1_081_634_178, 2523990) - // Standard Error: 3_025 - .saturating_add(Weight::from_parts(5_339_415, 0).saturating_mul(a.into())) + // Minimum execution time: 1_310_198_000 picoseconds. + Weight::from_parts(1_479_261_043, 2523990) + // Standard Error: 4_415 + .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) @@ -177,10 +180,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `455` // Estimated: `4326` - // Minimum execution time: 52_464_000 picoseconds. - Weight::from_parts(52_847_000, 4326) + // Minimum execution time: 51_910_000 picoseconds. + Weight::from_parts(53_441_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,10 +201,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn force_mint() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_327_000 picoseconds. - Weight::from_parts(51_093_000, 4326) + // Minimum execution time: 50_168_000 picoseconds. + Weight::from_parts(51_380_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -223,15 +226,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `530` + // Measured: `564` // Estimated: `4326` - // Minimum execution time: 51_342_000 picoseconds. - Weight::from_parts(51_846_000, 4326) + // Minimum execution time: 50_738_000 picoseconds. + Weight::from_parts(51_850_000, 4326) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:0) @@ -246,11 +251,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` + // Measured: `593` // Estimated: `4326` - // Minimum execution time: 38_309_000 picoseconds. - Weight::from_parts(38_672_000, 4326) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 41_055_000 picoseconds. + Weight::from_parts(42_336_000, 4326) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Nfts Collection (r:1 w:0) @@ -262,12 +267,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `729 + i * (108 ±0)` + // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 17_525_000 picoseconds. - Weight::from_parts(17_657_000, 3549) - // Standard Error: 15_884 - .saturating_add(Weight::from_parts(16_026_633, 0).saturating_mul(i.into())) + // Minimum execution time: 15_688_000 picoseconds. + Weight::from_parts(15_921_000, 3549) + // Standard Error: 14_827 + .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -279,10 +284,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 21_814_000 picoseconds. - Weight::from_parts(22_171_000, 3534) + // Minimum execution time: 19_981_000 picoseconds. + Weight::from_parts(20_676_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -292,10 +297,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 21_728_000 picoseconds. - Weight::from_parts(21_893_000, 3534) + // Minimum execution time: 19_911_000 picoseconds. + Weight::from_parts(20_612_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -305,10 +310,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn lock_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `306` + // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_359_000 picoseconds. - Weight::from_parts(19_101_000, 3549) + // Minimum execution time: 16_441_000 picoseconds. + Weight::from_parts(16_890_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -320,10 +325,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `354` + // Measured: `388` // Estimated: `3549` - // Minimum execution time: 24_713_000 picoseconds. - Weight::from_parts(25_032_000, 3549) + // Minimum execution time: 22_610_000 picoseconds. + Weight::from_parts(23_422_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -333,10 +338,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `369` // Estimated: `6078` - // Minimum execution time: 42_372_000 picoseconds. - Weight::from_parts(42_971_000, 6078) + // Minimum execution time: 39_739_000 picoseconds. + Weight::from_parts(41_306_000, 6078) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -346,10 +351,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `277` + // Measured: `311` // Estimated: `3549` - // Minimum execution time: 19_703_000 picoseconds. - Weight::from_parts(19_993_000, 3549) + // Minimum execution time: 17_685_000 picoseconds. + Weight::from_parts(18_258_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -359,10 +364,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn force_collection_config() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `276` // Estimated: `3549` - // Minimum execution time: 15_500_000 picoseconds. - Weight::from_parts(15_929_000, 3549) + // Minimum execution time: 13_734_000 picoseconds. + Weight::from_parts(14_337_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -372,10 +377,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 20_778_000 picoseconds. - Weight::from_parts(21_187_000, 3534) + // Minimum execution time: 19_269_000 picoseconds. + Weight::from_parts(19_859_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -391,10 +396,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `505` + // Measured: `539` // Estimated: `3911` - // Minimum execution time: 53_016_000 picoseconds. - Weight::from_parts(53_579_000, 3911) + // Minimum execution time: 51_540_000 picoseconds. + Weight::from_parts(52_663_000, 3911) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -404,10 +409,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `310` + // Measured: `344` // Estimated: `3911` - // Minimum execution time: 28_790_000 picoseconds. - Weight::from_parts(29_157_000, 3911) + // Minimum execution time: 26_529_000 picoseconds. + Weight::from_parts(27_305_000, 3911) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -421,10 +426,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `916` + // Measured: `950` // Estimated: `3911` - // Minimum execution time: 48_584_000 picoseconds. - Weight::from_parts(49_202_000, 3911) + // Minimum execution time: 46_951_000 picoseconds. + Weight::from_parts(48_481_000, 3911) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -434,10 +439,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: - // Measured: `347` + // Measured: `381` // Estimated: `4326` - // Minimum execution time: 19_616_000 picoseconds. - Weight::from_parts(19_972_000, 4326) + // Minimum execution time: 17_222_000 picoseconds. + Weight::from_parts(17_819_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -452,12 +457,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `803 + n * (364 ±0)` + // Measured: `837 + n * (364 ±0)` // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 28_897_000 picoseconds. - Weight::from_parts(29_061_000, 4326) - // Standard Error: 3_139 - .saturating_add(Weight::from_parts(5_396_415, 0).saturating_mul(n.into())) + // Minimum execution time: 26_185_000 picoseconds. + Weight::from_parts(27_038_000, 4326) + // Standard Error: 2_378 + .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -476,10 +481,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `505` + // Measured: `539` // Estimated: `3605` - // Minimum execution time: 43_748_000 picoseconds. - Weight::from_parts(44_178_000, 3605) + // Minimum execution time: 42_120_000 picoseconds. + Weight::from_parts(43_627_000, 3605) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -493,10 +498,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `608` + // Measured: `642` // Estimated: `3605` - // Minimum execution time: 42_116_000 picoseconds. - Weight::from_parts(42_455_000, 3605) + // Minimum execution time: 40_732_000 picoseconds. + Weight::from_parts(42_760_000, 3605) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -510,10 +515,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `364` + // Measured: `398` // Estimated: `3552` - // Minimum execution time: 40_926_000 picoseconds. - Weight::from_parts(41_512_000, 3552) + // Minimum execution time: 39_443_000 picoseconds. + Weight::from_parts(40_482_000, 3552) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -527,10 +532,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `475` + // Measured: `509` // Estimated: `3552` - // Minimum execution time: 39_792_000 picoseconds. - Weight::from_parts(40_443_000, 3552) + // Minimum execution time: 37_676_000 picoseconds. + Weight::from_parts(39_527_000, 3552) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -540,10 +545,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `376` + // Measured: `410` // Estimated: `4326` - // Minimum execution time: 22_648_000 picoseconds. - Weight::from_parts(23_139_000, 4326) + // Minimum execution time: 20_787_000 picoseconds. + Weight::from_parts(21_315_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -551,10 +556,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `384` + // Measured: `418` // Estimated: `4326` - // Minimum execution time: 20_552_000 picoseconds. - Weight::from_parts(20_920_000, 4326) + // Minimum execution time: 18_200_000 picoseconds. + Weight::from_parts(19_064_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -562,10 +567,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: - // Measured: `384` + // Measured: `418` // Estimated: `4326` - // Minimum execution time: 19_114_000 picoseconds. - Weight::from_parts(19_876_000, 4326) + // Minimum execution time: 17_128_000 picoseconds. + Weight::from_parts(17_952_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -573,10 +578,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3517` - // Minimum execution time: 17_089_000 picoseconds. - Weight::from_parts(17_363_000, 3517) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_262_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -586,10 +591,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `306` + // Measured: `340` // Estimated: `3549` - // Minimum execution time: 20_667_000 picoseconds. - Weight::from_parts(20_898_000, 3549) + // Minimum execution time: 18_435_000 picoseconds. + Weight::from_parts(18_775_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -599,10 +604,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: - // Measured: `289` + // Measured: `323` // Estimated: `3538` - // Minimum execution time: 19_666_000 picoseconds. - Weight::from_parts(20_136_000, 3538) + // Minimum execution time: 18_125_000 picoseconds. + Weight::from_parts(18_415_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -616,10 +621,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `518` // Estimated: `4326` - // Minimum execution time: 25_778_000 picoseconds. - Weight::from_parts(26_447_000, 4326) + // Minimum execution time: 23_237_000 picoseconds. + Weight::from_parts(24_128_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -629,6 +634,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:0) @@ -639,11 +646,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `671` + // Measured: `705` // Estimated: `4326` - // Minimum execution time: 50_809_000 picoseconds. - Weight::from_parts(51_503_000, 4326) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 53_291_000 picoseconds. + Weight::from_parts(54_614_000, 4326) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// The range of component `n` is `[0, 10]`. @@ -651,10 +658,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_789_000 picoseconds. - Weight::from_parts(5_528_034, 0) - // Standard Error: 14_405 - .saturating_add(Weight::from_parts(3_788_038, 0).saturating_mul(n.into())) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(4_039_901, 0) + // Standard Error: 10_309 + .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) } /// Storage: Nfts Item (r:2 w:0) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) @@ -662,10 +669,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn create_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `460` + // Measured: `494` // Estimated: `7662` - // Minimum execution time: 22_884_000 picoseconds. - Weight::from_parts(23_732_000, 7662) + // Minimum execution time: 21_011_000 picoseconds. + Weight::from_parts(22_065_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -675,10 +682,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `479` + // Measured: `513` // Estimated: `4326` - // Minimum execution time: 22_686_000 picoseconds. - Weight::from_parts(23_088_000, 4326) + // Minimum execution time: 21_423_000 picoseconds. + Weight::from_parts(21_743_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -688,6 +695,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:2 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:2 w:0) @@ -698,11 +707,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn claim_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `800` + // Measured: `834` // Estimated: `7662` - // Minimum execution time: 77_494_000 picoseconds. - Weight::from_parts(78_650_000, 7662) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 86_059_000 picoseconds. + Weight::from_parts(88_401_000, 7662) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: Nfts CollectionRoleOf (r:2 w:0) @@ -726,12 +735,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `595` + // Measured: `629` // Estimated: `6078 + n * (2921 ±0)` - // Minimum execution time: 139_109_000 picoseconds. - Weight::from_parts(144_449_034, 6078) - // Standard Error: 26_869 - .saturating_add(Weight::from_parts(29_961_772, 0).saturating_mul(n.into())) + // Minimum execution time: 146_746_000 picoseconds. + Weight::from_parts(152_885_862, 6078) + // Standard Error: 40_442 + .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -753,12 +762,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `625` + // Measured: `659` // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 78_280_000 picoseconds. - Weight::from_parts(92_826_883, 4326) - // Standard Error: 81_125 - .saturating_add(Weight::from_parts(29_898_245, 0).saturating_mul(n.into())) + // Minimum execution time: 83_960_000 picoseconds. + Weight::from_parts(98_609_885, 4326) + // Standard Error: 85_991 + .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -781,10 +790,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `182` + // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_664_000 picoseconds. - Weight::from_parts(41_224_000, 3549) + // Minimum execution time: 40_489_000 picoseconds. + Weight::from_parts(41_320_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -800,10 +809,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3549` - // Minimum execution time: 24_725_000 picoseconds. - Weight::from_parts(25_147_000, 3549) + // Minimum execution time: 23_257_000 picoseconds. + Weight::from_parts(23_770_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -828,12 +837,12 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32186 + a * (332 ±0)` + // Measured: `32220 + a * (332 ±0)` // Estimated: `2523990 + a * (2921 ±0)` - // Minimum execution time: 1_100_509_000 picoseconds. - Weight::from_parts(1_081_634_178, 2523990) - // Standard Error: 3_025 - .saturating_add(Weight::from_parts(5_339_415, 0).saturating_mul(a.into())) + // Minimum execution time: 1_310_198_000 picoseconds. + Weight::from_parts(1_479_261_043, 2523990) + // Standard Error: 4_415 + .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) @@ -854,10 +863,10 @@ impl WeightInfo for () { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `455` // Estimated: `4326` - // Minimum execution time: 52_464_000 picoseconds. - Weight::from_parts(52_847_000, 4326) + // Minimum execution time: 51_910_000 picoseconds. + Weight::from_parts(53_441_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -875,10 +884,10 @@ impl WeightInfo for () { /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) fn force_mint() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_327_000 picoseconds. - Weight::from_parts(51_093_000, 4326) + // Minimum execution time: 50_168_000 picoseconds. + Weight::from_parts(51_380_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -900,15 +909,17 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `530` + // Measured: `564` // Estimated: `4326` - // Minimum execution time: 51_342_000 picoseconds. - Weight::from_parts(51_846_000, 4326) + // Minimum execution time: 50_738_000 picoseconds. + Weight::from_parts(51_850_000, 4326) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:0) @@ -923,11 +934,11 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `559` + // Measured: `593` // Estimated: `4326` - // Minimum execution time: 38_309_000 picoseconds. - Weight::from_parts(38_672_000, 4326) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 41_055_000 picoseconds. + Weight::from_parts(42_336_000, 4326) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Nfts Collection (r:1 w:0) @@ -939,12 +950,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `729 + i * (108 ±0)` + // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 17_525_000 picoseconds. - Weight::from_parts(17_657_000, 3549) - // Standard Error: 15_884 - .saturating_add(Weight::from_parts(16_026_633, 0).saturating_mul(i.into())) + // Minimum execution time: 15_688_000 picoseconds. + Weight::from_parts(15_921_000, 3549) + // Standard Error: 14_827 + .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -956,10 +967,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 21_814_000 picoseconds. - Weight::from_parts(22_171_000, 3534) + // Minimum execution time: 19_981_000 picoseconds. + Weight::from_parts(20_676_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -969,10 +980,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 21_728_000 picoseconds. - Weight::from_parts(21_893_000, 3534) + // Minimum execution time: 19_911_000 picoseconds. + Weight::from_parts(20_612_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -982,10 +993,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn lock_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `306` + // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_359_000 picoseconds. - Weight::from_parts(19_101_000, 3549) + // Minimum execution time: 16_441_000 picoseconds. + Weight::from_parts(16_890_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -997,10 +1008,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `354` + // Measured: `388` // Estimated: `3549` - // Minimum execution time: 24_713_000 picoseconds. - Weight::from_parts(25_032_000, 3549) + // Minimum execution time: 22_610_000 picoseconds. + Weight::from_parts(23_422_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1010,10 +1021,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `369` // Estimated: `6078` - // Minimum execution time: 42_372_000 picoseconds. - Weight::from_parts(42_971_000, 6078) + // Minimum execution time: 39_739_000 picoseconds. + Weight::from_parts(41_306_000, 6078) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1023,10 +1034,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `277` + // Measured: `311` // Estimated: `3549` - // Minimum execution time: 19_703_000 picoseconds. - Weight::from_parts(19_993_000, 3549) + // Minimum execution time: 17_685_000 picoseconds. + Weight::from_parts(18_258_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1036,10 +1047,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn force_collection_config() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `276` // Estimated: `3549` - // Minimum execution time: 15_500_000 picoseconds. - Weight::from_parts(15_929_000, 3549) + // Minimum execution time: 13_734_000 picoseconds. + Weight::from_parts(14_337_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1049,10 +1060,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `435` // Estimated: `3534` - // Minimum execution time: 20_778_000 picoseconds. - Weight::from_parts(21_187_000, 3534) + // Minimum execution time: 19_269_000 picoseconds. + Weight::from_parts(19_859_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1068,10 +1079,10 @@ impl WeightInfo for () { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `505` + // Measured: `539` // Estimated: `3911` - // Minimum execution time: 53_016_000 picoseconds. - Weight::from_parts(53_579_000, 3911) + // Minimum execution time: 51_540_000 picoseconds. + Weight::from_parts(52_663_000, 3911) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1081,10 +1092,10 @@ impl WeightInfo for () { /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `310` + // Measured: `344` // Estimated: `3911` - // Minimum execution time: 28_790_000 picoseconds. - Weight::from_parts(29_157_000, 3911) + // Minimum execution time: 26_529_000 picoseconds. + Weight::from_parts(27_305_000, 3911) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1098,10 +1109,10 @@ impl WeightInfo for () { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `916` + // Measured: `950` // Estimated: `3911` - // Minimum execution time: 48_584_000 picoseconds. - Weight::from_parts(49_202_000, 3911) + // Minimum execution time: 46_951_000 picoseconds. + Weight::from_parts(48_481_000, 3911) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1111,10 +1122,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: - // Measured: `347` + // Measured: `381` // Estimated: `4326` - // Minimum execution time: 19_616_000 picoseconds. - Weight::from_parts(19_972_000, 4326) + // Minimum execution time: 17_222_000 picoseconds. + Weight::from_parts(17_819_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1129,12 +1140,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `803 + n * (364 ±0)` + // Measured: `837 + n * (364 ±0)` // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 28_897_000 picoseconds. - Weight::from_parts(29_061_000, 4326) - // Standard Error: 3_139 - .saturating_add(Weight::from_parts(5_396_415, 0).saturating_mul(n.into())) + // Minimum execution time: 26_185_000 picoseconds. + Weight::from_parts(27_038_000, 4326) + // Standard Error: 2_378 + .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1153,10 +1164,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `505` + // Measured: `539` // Estimated: `3605` - // Minimum execution time: 43_748_000 picoseconds. - Weight::from_parts(44_178_000, 3605) + // Minimum execution time: 42_120_000 picoseconds. + Weight::from_parts(43_627_000, 3605) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1170,10 +1181,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `608` + // Measured: `642` // Estimated: `3605` - // Minimum execution time: 42_116_000 picoseconds. - Weight::from_parts(42_455_000, 3605) + // Minimum execution time: 40_732_000 picoseconds. + Weight::from_parts(42_760_000, 3605) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1187,10 +1198,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `364` + // Measured: `398` // Estimated: `3552` - // Minimum execution time: 40_926_000 picoseconds. - Weight::from_parts(41_512_000, 3552) + // Minimum execution time: 39_443_000 picoseconds. + Weight::from_parts(40_482_000, 3552) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1204,10 +1215,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `475` + // Measured: `509` // Estimated: `3552` - // Minimum execution time: 39_792_000 picoseconds. - Weight::from_parts(40_443_000, 3552) + // Minimum execution time: 37_676_000 picoseconds. + Weight::from_parts(39_527_000, 3552) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1217,10 +1228,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `376` + // Measured: `410` // Estimated: `4326` - // Minimum execution time: 22_648_000 picoseconds. - Weight::from_parts(23_139_000, 4326) + // Minimum execution time: 20_787_000 picoseconds. + Weight::from_parts(21_315_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1228,10 +1239,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `384` + // Measured: `418` // Estimated: `4326` - // Minimum execution time: 20_552_000 picoseconds. - Weight::from_parts(20_920_000, 4326) + // Minimum execution time: 18_200_000 picoseconds. + Weight::from_parts(19_064_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1239,10 +1250,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: - // Measured: `384` + // Measured: `418` // Estimated: `4326` - // Minimum execution time: 19_114_000 picoseconds. - Weight::from_parts(19_876_000, 4326) + // Minimum execution time: 17_128_000 picoseconds. + Weight::from_parts(17_952_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1250,10 +1261,10 @@ impl WeightInfo for () { /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3517` - // Minimum execution time: 17_089_000 picoseconds. - Weight::from_parts(17_363_000, 3517) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_262_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1263,10 +1274,10 @@ impl WeightInfo for () { /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `306` + // Measured: `340` // Estimated: `3549` - // Minimum execution time: 20_667_000 picoseconds. - Weight::from_parts(20_898_000, 3549) + // Minimum execution time: 18_435_000 picoseconds. + Weight::from_parts(18_775_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1276,10 +1287,10 @@ impl WeightInfo for () { /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: - // Measured: `289` + // Measured: `323` // Estimated: `3538` - // Minimum execution time: 19_666_000 picoseconds. - Weight::from_parts(20_136_000, 3538) + // Minimum execution time: 18_125_000 picoseconds. + Weight::from_parts(18_415_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1293,10 +1304,10 @@ impl WeightInfo for () { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `518` // Estimated: `4326` - // Minimum execution time: 25_778_000 picoseconds. - Weight::from_parts(26_447_000, 4326) + // Minimum execution time: 23_237_000 picoseconds. + Weight::from_parts(24_128_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1306,6 +1317,8 @@ impl WeightInfo for () { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:1 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:1 w:0) @@ -1316,11 +1329,11 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `671` + // Measured: `705` // Estimated: `4326` - // Minimum execution time: 50_809_000 picoseconds. - Weight::from_parts(51_503_000, 4326) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 53_291_000 picoseconds. + Weight::from_parts(54_614_000, 4326) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// The range of component `n` is `[0, 10]`. @@ -1328,10 +1341,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_789_000 picoseconds. - Weight::from_parts(5_528_034, 0) - // Standard Error: 14_405 - .saturating_add(Weight::from_parts(3_788_038, 0).saturating_mul(n.into())) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(4_039_901, 0) + // Standard Error: 10_309 + .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) } /// Storage: Nfts Item (r:2 w:0) /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) @@ -1339,10 +1352,10 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) fn create_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `460` + // Measured: `494` // Estimated: `7662` - // Minimum execution time: 22_884_000 picoseconds. - Weight::from_parts(23_732_000, 7662) + // Minimum execution time: 21_011_000 picoseconds. + Weight::from_parts(22_065_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1352,10 +1365,10 @@ impl WeightInfo for () { /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) fn cancel_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `479` + // Measured: `513` // Estimated: `4326` - // Minimum execution time: 22_686_000 picoseconds. - Weight::from_parts(23_088_000, 4326) + // Minimum execution time: 21_423_000 picoseconds. + Weight::from_parts(21_743_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1365,6 +1378,8 @@ impl WeightInfo for () { /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) /// Storage: Nfts Collection (r:1 w:0) /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: Nfts Attribute (r:2 w:0) + /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) /// Storage: Nfts CollectionConfigOf (r:1 w:0) /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) /// Storage: Nfts ItemConfigOf (r:2 w:0) @@ -1375,11 +1390,11 @@ impl WeightInfo for () { /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn claim_swap() -> Weight { // Proof Size summary in bytes: - // Measured: `800` + // Measured: `834` // Estimated: `7662` - // Minimum execution time: 77_494_000 picoseconds. - Weight::from_parts(78_650_000, 7662) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 86_059_000 picoseconds. + Weight::from_parts(88_401_000, 7662) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: Nfts CollectionRoleOf (r:2 w:0) @@ -1403,12 +1418,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `595` + // Measured: `629` // Estimated: `6078 + n * (2921 ±0)` - // Minimum execution time: 139_109_000 picoseconds. - Weight::from_parts(144_449_034, 6078) - // Standard Error: 26_869 - .saturating_add(Weight::from_parts(29_961_772, 0).saturating_mul(n.into())) + // Minimum execution time: 146_746_000 picoseconds. + Weight::from_parts(152_885_862, 6078) + // Standard Error: 40_442 + .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1430,12 +1445,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `625` + // Measured: `659` // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 78_280_000 picoseconds. - Weight::from_parts(92_826_883, 4326) - // Standard Error: 81_125 - .saturating_add(Weight::from_parts(29_898_245, 0).saturating_mul(n.into())) + // Minimum execution time: 83_960_000 picoseconds. + Weight::from_parts(98_609_885, 4326) + // Standard Error: 85_991 + .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 50fc4ef68788f..450fee2abcce1 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -35,5 +35,12 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 92865c773d886..0a68f7d7142dc 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -36,6 +36,7 @@ //! * `clear_name` - Remove an account's associated name; the deposit is returned. //! * `kill_name` - Forcibly remove the associated name; the deposit is lost. +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; @@ -86,18 +87,37 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A name was set. - NameSet { who: T::AccountId }, + NameSet { + /// The account for which the name was set. + who: T::AccountId, + }, /// A name was forcibly set. - NameForced { target: T::AccountId }, + NameForced { + /// The account whose name was forcibly set. + target: T::AccountId, + }, /// A name was changed. - NameChanged { who: T::AccountId }, + NameChanged { + /// The account for which the name was changed. + who: T::AccountId, + }, /// A name was cleared, and the given balance returned. - NameCleared { who: T::AccountId, deposit: BalanceOf }, + NameCleared { + /// The account for which the name was cleared. + who: T::AccountId, + /// The deposit returned. + deposit: BalanceOf, + }, /// A name was removed and the given balance slashed. - NameKilled { target: T::AccountId, deposit: BalanceOf }, + NameKilled { + /// The account for which the name was removed. + target: T::AccountId, + /// The deposit returned. + deposit: BalanceOf, + }, } - /// Error for the nicks pallet. + /// Error for the Nicks pallet. #[pallet::error] pub enum Error { /// A name is too short. @@ -239,18 +259,14 @@ mod tests { use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + BuildStorage, }; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -264,14 +280,13 @@ mod tests { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -297,7 +312,7 @@ mod tests { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -315,7 +330,7 @@ mod tests { } fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10)] } .assimilate_storage(&mut t) .unwrap(); diff --git a/frame/nis/Cargo.toml b/frame/nis/Cargo.toml index c12d4f2c0a1eb..bad1bed8f9042 100644 --- a/frame/nis/Cargo.toml +++ b/frame/nis/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -39,10 +39,19 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nis/src/lib.rs b/frame/nis/src/lib.rs index c4d0d0d420290..decebbd56762e 100644 --- a/frame/nis/src/lib.rs +++ b/frame/nis/src/lib.rs @@ -162,13 +162,10 @@ pub mod pallet { use frame_support::{ pallet_prelude::*, traits::{ - fungible::{ - self, - hold::{Inspect as FunHoldInspect, Mutate as FunHoldMutate}, - Balanced as FunBalanced, - }, + fungible::{self, hold::Mutate as FunHoldMutate, Balanced as FunBalanced}, nonfungible::{Inspect as NftInspect, Transfer as NftTransfer}, tokens::{ + Balance, Fortitude::Polite, Precision::{BestEffort, Exact}, Preservation::Expendable, @@ -190,13 +187,10 @@ pub mod pallet { <::Currency as FunInspect<::AccountId>>::Balance; type DebtOf = fungible::Debt<::AccountId, ::Currency>; - type ReceiptRecordOf = ReceiptRecord< - ::AccountId, - ::BlockNumber, - BalanceOf, - >; + type ReceiptRecordOf = + ReceiptRecord<::AccountId, BlockNumberFor, BalanceOf>; type IssuanceInfoOf = IssuanceInfo>; - type SummaryRecordOf = SummaryRecord<::BlockNumber, BalanceOf>; + type SummaryRecordOf = SummaryRecord, BalanceOf>; type BidOf = Bid, ::AccountId>; type QueueTotalsTypeOf = BoundedVec<(u32, BalanceOf), ::QueueCount>; @@ -216,25 +210,14 @@ pub mod pallet { type Currency: FunInspect + FunMutate + FunBalanced - + FunHoldInspect - + FunHoldMutate; + + FunHoldMutate; - /// The identifier of the hold reason. + /// Overarching hold reason. + type RuntimeHoldReason: From; - #[pallet::constant] - type HoldReason: Get<>::Reason>; - - /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to - /// `From`. - type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned - + codec::FullCodec - + Copy - + MaybeSerializeDeserialize - + sp_std::fmt::Debug - + Default - + From - + TypeInfo - + MaxEncodedLen; + /// Just the [`Balance`] type; we have this item to allow us to constrain it to + /// [`From`]. + type CurrencyBalance: Balance + From; /// Origin required for auto-funding the deficit. type FundOrigin: EnsureOrigin; @@ -282,7 +265,7 @@ pub mod pallet { /// The base period for the duration queues. This is the common multiple across all /// supported freezing durations that can be bid upon. #[pallet::constant] - type BasePeriod: Get; + type BasePeriod: Get>; /// The minimum amount of funds that may be placed in a bid. Note that this /// does not actually limit the amount which may be represented in a receipt since bids may @@ -303,7 +286,7 @@ pub mod pallet { /// A larger value results in fewer storage hits each block, but a slower period to get to /// the target. #[pallet::constant] - type IntakePeriod: Get; + type IntakePeriod: Get>; /// The maximum amount of bids that can consolidated into receipts in a single intake. A /// larger value here means less of the block available for transactions should there be a @@ -313,7 +296,7 @@ pub mod pallet { /// The maximum proportion which may be thawed and the period over which it is reset. #[pallet::constant] - type ThawThrottle: Get<(Perquintill, Self::BlockNumber)>; + type ThawThrottle: Get<(Perquintill, BlockNumberFor)>; } #[pallet::pallet] @@ -420,7 +403,7 @@ pub mod pallet { /// The identity of the receipt. index: ReceiptIndex, /// The block number at which the receipt may be thawed. - expiry: T::BlockNumber, + expiry: BlockNumberFor, /// The owner of the receipt. who: T::AccountId, /// The proportion of the effective total issuance which the receipt represents. @@ -515,7 +498,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { let mut weight_counter = WeightCounter { used: Weight::zero(), limit: T::MaxIntakeWeight::get() }; if T::IntakePeriod::get().is_zero() || (n % T::IntakePeriod::get()).is_zero() { @@ -569,14 +552,14 @@ pub mod pallet { |q| -> Result<(u32, BalanceOf), DispatchError> { let queue_full = q.len() == T::MaxQueueLen::get() as usize; ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); - T::Currency::hold(&T::HoldReason::get(), &who, amount)?; + T::Currency::hold(&HoldReason::NftReceipt.into(), &who, amount)?; // queue is let mut bid = Bid { amount, who: who.clone() }; let net = if queue_full { sp_std::mem::swap(&mut q[0], &mut bid); let _ = T::Currency::release( - &T::HoldReason::get(), + &HoldReason::NftReceipt.into(), &bid.who, bid.amount, BestEffort, @@ -637,7 +620,7 @@ pub mod pallet { queue.remove(pos); let new_len = queue.len() as u32; - T::Currency::release(&T::HoldReason::get(), &bid.who, bid.amount, BestEffort)?; + T::Currency::release(&HoldReason::NftReceipt.into(), &bid.who, bid.amount, BestEffort)?; Queues::::insert(duration, queue); QueueTotals::::mutate(|qs| { @@ -729,7 +712,7 @@ pub mod pallet { let dropped = receipt.proportion.is_zero(); if amount > on_hold { - T::Currency::release(&T::HoldReason::get(), &who, on_hold, Exact)?; + T::Currency::release(&HoldReason::NftReceipt.into(), &who, on_hold, Exact)?; let deficit = amount - on_hold; // Try to transfer deficit from pot to receipt owner. summary.receipts_on_hold.saturating_reduce(on_hold); @@ -744,7 +727,7 @@ pub mod pallet { // Transfer excess of `on_hold` to the pot if we have now fully compensated for // the receipt. T::Currency::transfer_on_hold( - &T::HoldReason::get(), + &HoldReason::NftReceipt.into(), &who, &our_account, on_hold, @@ -760,7 +743,7 @@ pub mod pallet { )?; summary.receipts_on_hold.saturating_reduce(on_hold); } - T::Currency::release(&T::HoldReason::get(), &who, amount, Exact)?; + T::Currency::release(&HoldReason::NftReceipt.into(), &who, amount, Exact)?; } if dropped { @@ -852,7 +835,7 @@ pub mod pallet { ensure!(owner == who, Error::::NotOwner); // Unreserve and transfer the funds to the pot. - let reason = T::HoldReason::get(); + let reason = HoldReason::NftReceipt.into(); let us = Self::account_id(); T::Currency::transfer_on_hold(&reason, &who, &us, on_hold, Exact, Free, Polite) .map_err(|_| Error::::Unfunded)?; @@ -903,7 +886,7 @@ pub mod pallet { )?; // Transfer the funds from the pot to the owner and reserve - let reason = T::HoldReason::get(); + let reason = HoldReason::NftReceipt.into(); let us = Self::account_id(); T::Currency::transfer_and_hold(&reason, &us, &who, amount, Exact, Expendable, Polite)?; @@ -959,7 +942,7 @@ pub mod pallet { let mut item = Receipts::::get(index).ok_or(TokenError::UnknownAsset)?; let (owner, on_hold) = item.owner.take().ok_or(Error::::AlreadyCommunal)?; - let reason = T::HoldReason::get(); + let reason = HoldReason::NftReceipt.into(); T::Currency::transfer_on_hold(&reason, &owner, dest, on_hold, Exact, OnHold, Polite)?; item.owner = Some((dest.clone(), on_hold)); @@ -1069,7 +1052,7 @@ pub mod pallet { pub(crate) fn process_queue( duration: u32, - now: T::BlockNumber, + now: BlockNumberFor, our_account: &T::AccountId, issuance: &IssuanceInfo>, max_bids: u32, @@ -1113,7 +1096,7 @@ pub mod pallet { pub(crate) fn process_bid( mut bid: BidOf, - expiry: T::BlockNumber, + expiry: BlockNumberFor, _our_account: &T::AccountId, issuance: &IssuanceInfo>, remaining: &mut BalanceOf, diff --git a/frame/nis/src/mock.rs b/frame/nis/src/mock.rs index 0ca6690936818..76fdf5f3f0693 100644 --- a/frame/nis/src/mock.rs +++ b/frame/nis/src/mock.rs @@ -19,7 +19,6 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; -use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ord_parameter_types, parameter_types, traits::{ @@ -30,24 +29,19 @@ use frame_support::{ PalletId, }; use pallet_balances::{Instance1, Instance2}; -use scale_info::TypeInfo; use sp_core::{ConstU128, H256}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; pub type Balance = u64; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances::, @@ -62,13 +56,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -95,17 +88,10 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = HoldIdentifier; + type RuntimeHoldReason = RuntimeHoldReason; type MaxHolds = ConstU32<1>; } -#[derive( - Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, Debug, TypeInfo, -)] -pub enum HoldIdentifier { - Nis, -} - impl pallet_balances::Config for Test { type Balance = u128; type DustRemoval = (); @@ -122,7 +108,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -133,7 +119,6 @@ parameter_types! { pub const MinReceipt: Perquintill = Perquintill::from_percent(1); pub const ThawThrottle: (Perquintill, u64) = (Perquintill::from_percent(25), 5); pub static MaxIntakeWeight: Weight = Weight::from_parts(2_000_000_000_000, 0); - pub const HoldReason: HoldIdentifier = HoldIdentifier::Nis; } ord_parameter_types! { @@ -161,13 +146,13 @@ impl pallet_nis::Config for Test { type MaxIntakeWeight = MaxIntakeWeight; type MinReceipt = MinReceipt; type ThawThrottle = ThawThrottle; - type HoldReason = HoldReason; + type RuntimeHoldReason = RuntimeHoldReason; } // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)], } @@ -180,7 +165,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // our desired mockup, but without any balances. #[cfg(feature = "runtime-benchmarks")] pub fn new_test_ext_empty() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } pub fn run_to_block(n: u64) { diff --git a/frame/nis/src/weights.rs b/frame/nis/src/weights.rs index 4f92da874b5a2..cba2f0049055b 100644 --- a/frame/nis/src/weights.rs +++ b/frame/nis/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_nis //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_nis +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_nis. pub trait WeightInfo { @@ -67,51 +71,51 @@ impl WeightInfo for SubstrateWeight { /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6175 + l * (48 ±0)` + // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 49_132_000 picoseconds. - Weight::from_parts(55_373_619, 51487) - // Standard Error: 198 - .saturating_add(Weight::from_parts(44_421, 0).saturating_mul(l.into())) + // Minimum execution time: 49_410_000 picoseconds. + Weight::from_parts(57_832_282, 51487) + // Standard Error: 288 + .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn place_bid_max() -> Weight { // Proof Size summary in bytes: - // Measured: `54177` + // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 111_471_000 picoseconds. - Weight::from_parts(112_287_000, 51487) + // Minimum execution time: 119_696_000 picoseconds. + Weight::from_parts(121_838_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6175 + l * (48 ±0)` + // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 51_134_000 picoseconds. - Weight::from_parts(52_353_883, 51487) - // Standard Error: 161 - .saturating_add(Weight::from_parts(62_171, 0).saturating_mul(l.into())) + // Minimum execution time: 50_843_000 picoseconds. + Weight::from_parts(54_237_365, 51487) + // Standard Error: 243 + .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -123,15 +127,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 41_421_000 picoseconds. - Weight::from_parts(41_762_000, 3593) + // Minimum execution time: 40_752_000 picoseconds. + Weight::from_parts(41_899_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) @@ -139,13 +143,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `667` + // Measured: `668` // Estimated: `3675` - // Minimum execution time: 74_179_000 picoseconds. - Weight::from_parts(74_795_000, 3675) + // Minimum execution time: 79_779_000 picoseconds. + Weight::from_parts(82_478_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -158,15 +162,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `828` + // Measured: `829` // Estimated: `3675` - // Minimum execution time: 85_252_000 picoseconds. - Weight::from_parts(85_949_000, 3675) + // Minimum execution time: 99_588_000 picoseconds. + Weight::from_parts(102_340_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -174,18 +178,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) + /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) fn thaw_private() -> Weight { // Proof Size summary in bytes: - // Measured: `456` + // Measured: `354` // Estimated: `3593` - // Minimum execution time: 82_100_000 picoseconds. - Weight::from_parts(82_563_000, 3593) + // Minimum execution time: 53_094_000 picoseconds. + Weight::from_parts(54_543_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) @@ -194,15 +198,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 86_498_000 picoseconds. - Weight::from_parts(87_175_000, 3675) + // Minimum execution time: 107_248_000 picoseconds. + Weight::from_parts(109_923_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -216,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 22_507_000 picoseconds. - Weight::from_parts(22_788_000, 7487) + // Minimum execution time: 27_169_000 picoseconds. + Weight::from_parts(29_201_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -227,8 +231,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_692_000 picoseconds. - Weight::from_parts(4_862_000, 51487) + // Minimum execution time: 4_540_000 picoseconds. + Weight::from_parts(4_699_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_031_000 picoseconds. - Weight::from_parts(8_183_000, 0) + // Minimum execution time: 7_085_000 picoseconds. + Weight::from_parts(7_336_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -249,51 +253,51 @@ impl WeightInfo for () { /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6175 + l * (48 ±0)` + // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 49_132_000 picoseconds. - Weight::from_parts(55_373_619, 51487) - // Standard Error: 198 - .saturating_add(Weight::from_parts(44_421, 0).saturating_mul(l.into())) + // Minimum execution time: 49_410_000 picoseconds. + Weight::from_parts(57_832_282, 51487) + // Standard Error: 288 + .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) fn place_bid_max() -> Weight { // Proof Size summary in bytes: - // Measured: `54177` + // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 111_471_000 picoseconds. - Weight::from_parts(112_287_000, 51487) + // Minimum execution time: 119_696_000 picoseconds. + Weight::from_parts(121_838_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Nis Queues (r:1 w:1) /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: Nis QueueTotals (r:1 w:1) /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6175 + l * (48 ±0)` + // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 51_134_000 picoseconds. - Weight::from_parts(52_353_883, 51487) - // Standard Error: 161 - .saturating_add(Weight::from_parts(62_171, 0).saturating_mul(l.into())) + // Minimum execution time: 50_843_000 picoseconds. + Weight::from_parts(54_237_365, 51487) + // Standard Error: 243 + .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -305,15 +309,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 41_421_000 picoseconds. - Weight::from_parts(41_762_000, 3593) + // Minimum execution time: 40_752_000 picoseconds. + Weight::from_parts(41_899_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) @@ -321,13 +325,13 @@ impl WeightInfo for () { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `667` + // Measured: `668` // Estimated: `3675` - // Minimum execution time: 74_179_000 picoseconds. - Weight::from_parts(74_795_000, 3675) + // Minimum execution time: 79_779_000 picoseconds. + Weight::from_parts(82_478_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -340,15 +344,15 @@ impl WeightInfo for () { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `828` + // Measured: `829` // Estimated: `3675` - // Minimum execution time: 85_252_000 picoseconds. - Weight::from_parts(85_949_000, 3675) + // Minimum execution time: 99_588_000 picoseconds. + Weight::from_parts(102_340_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -356,18 +360,18 @@ impl WeightInfo for () { /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) /// Storage: Nis Summary (r:1 w:1) /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) + /// Storage: System Account (r:1 w:0) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(66), added: 2541, mode: MaxEncodedLen) + /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) fn thaw_private() -> Weight { // Proof Size summary in bytes: - // Measured: `456` + // Measured: `354` // Estimated: `3593` - // Minimum execution time: 82_100_000 picoseconds. - Weight::from_parts(82_563_000, 3593) + // Minimum execution time: 53_094_000 picoseconds. + Weight::from_parts(54_543_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Nis Receipts (r:1 w:1) /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) @@ -376,15 +380,15 @@ impl WeightInfo for () { /// Storage: Assets Asset (r:1 w:1) /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(102), added: 2577, mode: MaxEncodedLen) + /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 86_498_000 picoseconds. - Weight::from_parts(87_175_000, 3675) + // Minimum execution time: 107_248_000 picoseconds. + Weight::from_parts(109_923_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -398,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 22_507_000 picoseconds. - Weight::from_parts(22_788_000, 7487) + // Minimum execution time: 27_169_000 picoseconds. + Weight::from_parts(29_201_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -409,8 +413,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_692_000 picoseconds. - Weight::from_parts(4_862_000, 51487) + // Minimum execution time: 4_540_000 picoseconds. + Weight::from_parts(4_699_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -420,8 +424,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_031_000 picoseconds. - Weight::from_parts(8_183_000, 0) + // Minimum execution time: 7_085_000 picoseconds. + Weight::from_parts(7_336_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index ed3f59a9a1e24..24155841271e5 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -12,15 +12,15 @@ description = "FRAME pallet for node authorization" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -35,4 +35,8 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 6ccc142431e61..8a823d29f2355 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -115,7 +115,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize_nodes(&self.nodes); } @@ -169,7 +169,7 @@ pub mod pallet { impl Hooks> for Pallet { /// Set reserved node every block. It may not be enabled depends on the offchain /// worker settings when starting the node. - fn offchain_worker(now: T::BlockNumber) { + fn offchain_worker(now: frame_system::pallet_prelude::BlockNumberFor) { let network_state = sp_io::offchain::network_state(); match network_state { Err(_) => log::error!( diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs index b7c5957e15dee..84e3336b3bd68 100644 --- a/frame/node-authorization/src/mock.rs +++ b/frame/node-authorization/src/mock.rs @@ -22,25 +22,21 @@ use crate as pallet_node_authorization; use frame_support::{ ord_parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, NodeAuthorization: pallet_node_authorization::{ Pallet, Call, Storage, Config, Event, }, @@ -53,14 +49,13 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -97,7 +92,7 @@ pub fn test_node(id: u8) -> PeerId { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_node_authorization::GenesisConfig:: { nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], } diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index c92d9b5124697..f3b9a025ecf7c 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -13,26 +13,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # FRAME frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } log = { version = "0.4.0", default-features = false } # Optional: use for testing and/or fuzzing pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing", optional = true } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing", optional = true } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } [features] default = ["std"] @@ -48,10 +48,19 @@ std = [ "sp-staking/std", "sp-core/std", "log/std", + "pallet-balances?/std", + "sp-tracing?/std" ] runtime-benchmarks = [ "sp-staking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances?/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] try-runtime = [ - "frame-support/try-runtime" + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances?/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/nomination-pools/benchmarking/Cargo.toml b/frame/nomination-pools/benchmarking/Cargo.toml index 4f757cafa2fd1..38b79d2dabc4c 100644 --- a/frame/nomination-pools/benchmarking/Cargo.toml +++ b/frame/nomination-pools/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } # FRAME @@ -27,17 +27,17 @@ pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../. pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../" } # Substrate Primitives -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime-interface" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../../../primitives/runtime-interface" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } [features] default = ["std"] @@ -54,6 +54,10 @@ std = [ "sp-runtime-interface/std", "sp-staking/std", "sp-std/std", + "pallet-balances/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ @@ -66,4 +70,6 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-nomination-pools/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks" ] diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 137b9e9af63e3..e757f66e725a0 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -35,6 +35,7 @@ use pallet_nomination_pools::{ MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; +use pallet_staking::MaxNominationsOf; use sp_runtime::{ traits::{Bounded, StaticLookup, Zero}, Perbill, @@ -564,7 +565,7 @@ frame_benchmarking::benchmarks! { } nominate { - let n in 1 .. T::MaxNominations::get(); + let n in 1 .. MaxNominationsOf::::get(); // Create a pool let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); @@ -609,7 +610,7 @@ frame_benchmarking::benchmarks! { let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); BondedPools::::mutate(&1, |maybe_pool| { // Force the pool into an invalid state - maybe_pool.as_mut().map(|mut pool| pool.points = min_create_bond * 10u32.into()); + maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); }); let caller = account("caller", 0, USER_SEED); @@ -679,7 +680,7 @@ frame_benchmarking::benchmarks! { let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Nominate with the pool. - let validators: Vec<_> = (0..T::MaxNominations::get()) + let validators: Vec<_> = (0..MaxNominationsOf::::get()) .map(|i| account("stash", USER_SEED, i)) .collect(); diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index cffb712ea2ae5..2d75df63b518a 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -20,11 +20,11 @@ use frame_election_provider_support::VoteWeight; use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; use sp_runtime::{ traits::{Convert, IdentityLookup}, - FixedU128, Perbill, + BuildStorage, FixedU128, Perbill, }; type AccountId = u128; -type AccountIndex = u32; +type Nonce = u32; type BlockNumber = u64; type Balance = u128; @@ -34,14 +34,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -77,7 +76,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -95,11 +94,10 @@ parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } impl pallet_staking::Config for Runtime { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -118,9 +116,10 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = Pools; + type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -175,12 +174,9 @@ impl pallet_nomination_pools::Config for Runtime { impl crate::Config for Runtime {} type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: frame_system::{Pallet, Call, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, @@ -192,7 +188,7 @@ frame_support::construct_runtime!( ); pub fn new_test_ext() -> sp_io::TestExternalities { - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = pallet_nomination_pools::GenesisConfig:: { min_join_bond: 2, min_create_bond: 2, diff --git a/frame/nomination-pools/runtime-api/Cargo.toml b/frame/nomination-pools/runtime-api/Cargo.toml index 5e290232a115e..f92a303e9bbe1 100644 --- a/frame/nomination-pools/runtime-api/Cargo.toml +++ b/frame/nomination-pools/runtime-api/Cargo.toml @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../" } [features] diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index cc68f54bd55cf..c4bebc5a1d030 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -326,7 +326,7 @@ //! //! This section assumes that the slash computation is executed by //! `pallet_staking::StakingLedger::slash`, which passes the information to this pallet via -//! [`sp_staking::OnStakerSlash::on_slash`]. +//! [`sp_staking::OnStakingUpdate::on_slash`]. //! //! Unbonding pools need to be slashed to ensure all nominators whom where in the bonded pool while //! it was backing a validator that equivocated are punished. Without these measures a member could @@ -341,10 +341,6 @@ //! in addition to the unbonding pools. For maintenance simplicity these are not implemented. //! Related: //! -//! **Relevant methods:** -//! -//! * [`Pallet::on_slash`] -//! //! ### Limitations //! //! * PoolMembers cannot vote with their staked funds because they are transferred into the pools @@ -366,6 +362,7 @@ use frame_support::{ }, DefaultNoBound, PalletError, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_core::U256; use sp_runtime::{ @@ -375,9 +372,12 @@ use sp_runtime::{ }, FixedPointNumber, Perbill, }; -use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; +use sp_staking::{EraIndex, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; +#[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] +use sp_runtime::TryRuntimeError; + /// The log target of this pallet. pub const LOG_TARGET: &str = "runtime::nomination-pools"; @@ -673,10 +673,10 @@ pub struct Commission { pub max: Option, /// Optional configuration around how often commission can be updated, and when the last /// commission update took place. - pub change_rate: Option>, + pub change_rate: Option>>, /// The block from where throttling should be checked from. This value will be updated on all /// commission updates and when setting an initial `change_rate`. - pub throttle_from: Option, + pub throttle_from: Option>, } impl Commission { @@ -746,6 +746,10 @@ impl Commission { None => None, Some((commission, payee)) => { ensure!(!self.throttling(commission), Error::::CommissionChangeThrottled); + ensure!( + commission <= &GlobalMaxCommission::::get().unwrap_or(Bounded::max_value()), + Error::::CommissionExceedsGlobalMaximum + ); ensure!( self.max.map_or(true, |m| commission <= &m), Error::::CommissionExceedsMaximum @@ -770,6 +774,10 @@ impl Commission { /// updated to the new maximum. This will also register a `throttle_from` update. /// A `PoolCommissionUpdated` event is triggered if `current.0` is updated. fn try_update_max(&mut self, pool_id: PoolId, new_max: Perbill) -> DispatchResult { + ensure!( + new_max <= GlobalMaxCommission::::get().unwrap_or(Bounded::max_value()), + Error::::CommissionExceedsGlobalMaximum + ); if let Some(old) = self.max.as_mut() { if new_max > *old { return Err(Error::::MaxCommissionRestricted.into()) @@ -810,7 +818,7 @@ impl Commission { /// throttling can be checked from this block. fn try_update_change_rate( &mut self, - change_rate: CommissionChangeRate, + change_rate: CommissionChangeRate>, ) -> DispatchResult { ensure!(!&self.less_restrictive(&change_rate), Error::::CommissionChangeRateNotAllowed); @@ -830,7 +838,7 @@ impl Commission { /// /// No change rate will always be less restrictive than some change rate, so where no /// `change_rate` is currently set, `false` is returned. - fn less_restrictive(&self, new: &CommissionChangeRate) -> bool { + fn less_restrictive(&self, new: &CommissionChangeRate>) -> bool { self.change_rate .as_ref() .map(|c| new.max_increase > c.max_increase || new.min_delay < c.min_delay) @@ -1673,7 +1681,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { MinJoinBond::::put(self.min_join_bond); MinCreateBond::::put(self.min_create_bond); @@ -1758,7 +1766,7 @@ pub mod pallet { /// A pool's commission `change_rate` has been changed. PoolCommissionChangeRateUpdated { pool_id: PoolId, - change_rate: CommissionChangeRate, + change_rate: CommissionChangeRate>, }, /// Pool commission has been claimed. PoolCommissionClaimed { pool_id: PoolId, commission: BalanceOf }, @@ -1821,6 +1829,8 @@ pub mod pallet { MaxCommissionRestricted, /// The supplied commission exceeds the max allowed commission. CommissionExceedsMaximum, + /// The supplied commission exceeds global maximum commission. + CommissionExceedsGlobalMaximum, /// Not enough blocks have surpassed since the last commission update. CommissionChangeThrottled, /// The submitted changes to commission change rate are not allowed. @@ -2594,7 +2604,7 @@ pub mod pallet { pub fn set_commission_change_rate( origin: OriginFor, pool_id: PoolId, - change_rate: CommissionChangeRate, + change_rate: CommissionChangeRate>, ) -> DispatchResult { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; @@ -2626,7 +2636,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), &'static str> { + fn try_state(_n: BlockNumberFor) -> Result<(), TryRuntimeError> { Self::do_try_state(u8::MAX) } @@ -3055,7 +3065,7 @@ impl Pallet { /// multiple `level`s, where the higher the level, the more checks we performs. So, /// `try_state(255)` is the strongest sanity check, and `0` performs no checks. #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] - pub fn do_try_state(level: u8) -> Result<(), &'static str> { + pub fn do_try_state(level: u8) -> Result<(), TryRuntimeError> { if level.is_zero() { return Ok(()) } @@ -3063,12 +3073,24 @@ impl Pallet { // result in the same set of keys, in the same order. let bonded_pools = BondedPools::::iter_keys().collect::>(); let reward_pools = RewardPools::::iter_keys().collect::>(); - assert_eq!(bonded_pools, reward_pools); + ensure!( + bonded_pools == reward_pools, + "`BondedPools` and `RewardPools` must all have the EXACT SAME key-set." + ); - assert!(Metadata::::iter_keys().all(|k| bonded_pools.contains(&k))); - assert!(SubPoolsStorage::::iter_keys().all(|k| bonded_pools.contains(&k))); + ensure!( + SubPoolsStorage::::iter_keys().all(|k| bonded_pools.contains(&k)), + "`SubPoolsStorage` must be a subset of the above superset." + ); + ensure!( + Metadata::::iter_keys().all(|k| bonded_pools.contains(&k)), + "`Metadata` keys must be a subset of the above superset." + ); - assert!(MaxPools::::get().map_or(true, |max| bonded_pools.len() <= (max as usize))); + ensure!( + MaxPools::::get().map_or(true, |max| bonded_pools.len() <= (max as usize)), + Error::::MaxPools + ); for id in reward_pools { let account = Self::create_reward_account(id); @@ -3088,9 +3110,9 @@ impl Pallet { let mut pools_members = BTreeMap::::new(); let mut pools_members_pending_rewards = BTreeMap::>::new(); let mut all_members = 0u32; - PoolMembers::::iter().for_each(|(_, d)| { + PoolMembers::::iter().try_for_each(|(_, d)| -> Result<(), TryRuntimeError> { let bonded_pool = BondedPools::::get(d.pool_id).unwrap(); - assert!(!d.total_points().is_zero(), "no member should have zero points: {d:?}"); + ensure!(!d.total_points().is_zero(), "No member should have zero points"); *pools_members.entry(d.pool_id).or_default() += 1; all_members += 1; @@ -3103,42 +3125,56 @@ impl Pallet { let pending_rewards = d.pending_rewards(current_rc).unwrap(); *pools_members_pending_rewards.entry(d.pool_id).or_default() += pending_rewards; } // else this pool has been heavily slashed and cannot have any rewards anymore. - }); - RewardPools::::iter_keys().for_each(|id| { + Ok(()) + })?; + + RewardPools::::iter_keys().try_for_each(|id| -> Result<(), TryRuntimeError> { // the sum of the pending rewards must be less than the leftover balance. Since the // reward math rounds down, we might accumulate some dust here. - log!( - trace, - "pool {:?}, sum pending rewards = {:?}, remaining balance = {:?}", - id, - pools_members_pending_rewards.get(&id), - RewardPool::::current_balance(id) + let pending_rewards_lt_leftover_bal = RewardPool::::current_balance(id) >= + pools_members_pending_rewards.get(&id).copied().unwrap_or_default(); + if !pending_rewards_lt_leftover_bal { + log::warn!( + "pool {:?}, sum pending rewards = {:?}, remaining balance = {:?}", + id, + pools_members_pending_rewards.get(&id), + RewardPool::::current_balance(id) + ); + } + ensure!( + pending_rewards_lt_leftover_bal, + "The sum of the pending rewards must be less than the leftover balance." ); - assert!( - RewardPool::::current_balance(id) >= - pools_members_pending_rewards.get(&id).copied().unwrap_or_default() - ) - }); + Ok(()) + })?; - BondedPools::::iter().for_each(|(id, inner)| { + BondedPools::::iter().try_for_each(|(id, inner)| -> Result<(), TryRuntimeError> { let bonded_pool = BondedPool { id, inner }; - assert_eq!( - pools_members.get(&id).copied().unwrap_or_default(), - bonded_pool.member_counter + ensure!( + pools_members.get(&id).copied().unwrap_or_default() == + bonded_pool.member_counter, + "Each `BondedPool.member_counter` must be equal to the actual count of members of this pool" + ); + ensure!( + MaxPoolMembersPerPool::::get() + .map_or(true, |max| bonded_pool.member_counter <= max), + Error::::MaxPoolMembers ); - assert!(MaxPoolMembersPerPool::::get() - .map_or(true, |max| bonded_pool.member_counter <= max)); let depositor = PoolMembers::::get(&bonded_pool.roles.depositor).unwrap(); - assert!( + ensure!( bonded_pool.is_destroying_and_only_depositor(depositor.active_points()) || depositor.active_points() >= MinCreateBond::::get(), "depositor must always have MinCreateBond stake in the pool, except for when the \ pool is being destroyed and the depositor is the last member", ); - }); - assert!(MaxPoolMembers::::get().map_or(true, |max| all_members <= max)); + Ok(()) + })?; + ensure!( + MaxPoolMembers::::get().map_or(true, |max| all_members <= max), + Error::::MaxPoolMembers + ); if level <= 1 { return Ok(()) @@ -3226,7 +3262,7 @@ impl Pallet { } } -impl OnStakerSlash> for Pallet { +impl sp_staking::OnStakingUpdate> for Pallet { fn on_slash( pool_account: &T::AccountId, // Bonded balance is always read directly from staking, therefore we don't need to update diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 45d6424119900..2ae4cd1b86857 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -20,6 +20,9 @@ use crate::log; use frame_support::traits::OnRuntimeUpgrade; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + pub mod v1 { use super::*; @@ -100,9 +103,12 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { // new version must be set. - assert_eq!(Pallet::::on_chain_storage_version(), 1); + ensure!( + Pallet::::on_chain_storage_version() == 1, + "The onchain version must be updated after the migration." + ); Pallet::::try_state(frame_system::Pallet::::block_number())?; Ok(()) } @@ -352,38 +358,47 @@ pub mod v2 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { // all reward accounts must have more than ED. - RewardPools::::iter().for_each(|(id, _)| { - assert!( + RewardPools::::iter().try_for_each(|(id, _)| -> Result<(), TryRuntimeError> { + ensure!( T::Currency::free_balance(&Pallet::::create_reward_account(id)) >= - T::Currency::minimum_balance() - ) - }); + T::Currency::minimum_balance(), + "Reward accounts must have greater balance than ED." + ); + Ok(()) + })?; Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { // new version must be set. - assert_eq!(Pallet::::on_chain_storage_version(), 2); + ensure!( + Pallet::::on_chain_storage_version() == 2, + "The onchain version must be updated after the migration." + ); // no reward or bonded pool has been skipped. - assert_eq!(RewardPools::::iter().count() as u32, RewardPools::::count()); - assert_eq!(BondedPools::::iter().count() as u32, BondedPools::::count()); + ensure!( + RewardPools::::iter().count() as u32 == RewardPools::::count(), + "The count of reward pools must remain the same after the migration." + ); + ensure!( + BondedPools::::iter().count() as u32 == BondedPools::::count(), + "The count of reward pools must remain the same after the migration." + ); // all reward pools must have exactly ED in them. This means no reward can be claimed, // and that setting reward counters all over the board to zero will work henceforth. - RewardPools::::iter().for_each(|(id, _)| { - assert_eq!( - RewardPool::::current_balance(id), - Zero::zero(), - "reward pool({}) balance is {:?}", - id, - RewardPool::::current_balance(id) + RewardPools::::iter().try_for_each(|(id, _)| -> Result<(), TryRuntimeError> { + ensure!( + RewardPool::::current_balance(id) == Zero::zero(), + "Reward pool balance must be zero.", ); - }); + Ok(()) + })?; log!(info, "post upgrade hook for MigrateToV2 executed."); Ok(()) @@ -401,14 +416,14 @@ pub mod v3 { let current = Pallet::::current_storage_version(); let onchain = Pallet::::on_chain_storage_version(); - log!( - info, - "Running migration with current storage version {:?} / onchain {:?}", - current, - onchain - ); + if onchain == 2 { + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); - if current > onchain { let mut metadata_iterated = 0u64; let mut metadata_removed = 0u64; Metadata::::iter_keys() @@ -422,7 +437,7 @@ pub mod v3 { metadata_removed += 1; Metadata::::remove(&id); }); - current.put::>(); + StorageVersion::new(3).put::>(); // metadata iterated + bonded pools read + a storage version read let total_reads = metadata_iterated * 2 + 1; // metadata removed + a storage version write @@ -435,21 +450,20 @@ pub mod v3 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - ensure!( - Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), - "the on_chain version is equal or more than the current one" - ); + fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { ensure!( Metadata::::iter_keys().all(|id| BondedPools::::contains_key(&id)), "not all of the stale metadata has been removed" ); - ensure!(Pallet::::on_chain_storage_version() == 3, "wrong storage version"); + ensure!( + Pallet::::on_chain_storage_version() >= 3, + "nomination-pools::migration::v3: wrong storage version" + ); Ok(()) } } @@ -535,29 +549,36 @@ pub mod v4 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - ensure!( - Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), - "the on_chain version is equal or more than the current one" - ); + fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { // ensure all BondedPools items now contain an `inner.commission: Commission` field. ensure!( - BondedPools::::iter().all(|(_, inner)| inner.commission.current.is_none() && - inner.commission.max.is_none() && - inner.commission.change_rate.is_none() && - inner.commission.throttle_from.is_none()), - "a commission value has been incorrectly set" + BondedPools::::iter().all(|(_, inner)| + // Check current + (inner.commission.current.is_none() || + inner.commission.current.is_some()) && + // Check max + (inner.commission.max.is_none() || inner.commission.max.is_some()) && + // Check change_rate + (inner.commission.change_rate.is_none() || + inner.commission.change_rate.is_some()) && + // Check throttle_from + (inner.commission.throttle_from.is_none() || + inner.commission.throttle_from.is_some())), + "a commission value has not been set correctly" ); ensure!( GlobalMaxCommission::::get() == Some(U::get()), "global maximum commission error" ); - ensure!(Pallet::::on_chain_storage_version() == 4, "wrong storage version"); + ensure!( + Pallet::::on_chain_storage_version() >= 4, + "nomination-pools::migration::v4: wrong storage version" + ); Ok(()) } } @@ -620,12 +641,7 @@ pub mod v5 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - ensure!( - Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), - "the on_chain version is equal or more than the current one" - ); - + fn pre_upgrade() -> Result, TryRuntimeError> { let rpool_keys = RewardPools::::iter_keys().count(); let rpool_values = RewardPools::::iter_values().count(); if rpool_keys != rpool_values { @@ -654,7 +670,7 @@ pub mod v5 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(data: Vec) -> Result<(), &'static str> { + fn post_upgrade(data: Vec) -> Result<(), TryRuntimeError> { let old_rpool_values: u64 = Decode::decode(&mut &data[..]).unwrap(); let rpool_keys = RewardPools::::iter_keys().count() as u64; let rpool_values = RewardPools::::iter_values().count() as u64; @@ -675,13 +691,16 @@ pub mod v5 { // `total_commission_claimed` field. ensure!( RewardPools::::iter().all(|(_, reward_pool)| reward_pool - .total_commission_pending - .is_zero() && reward_pool - .total_commission_claimed - .is_zero()), + .total_commission_pending >= + Zero::zero() && reward_pool + .total_commission_claimed >= + Zero::zero()), "a commission value has been incorrectly set" ); - ensure!(Pallet::::on_chain_storage_version() == 5, "wrong storage version"); + ensure!( + Pallet::::on_chain_storage_version() >= 5, + "nomination-pools::migration::v5: wrong storage version" + ); // These should not have been touched - just in case. ensure!( diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 3ab9be516fdb9..7d0d729a40d41 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -2,7 +2,7 @@ use super::*; use crate::{self as pools}; use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; -use sp_runtime::FixedU128; +use sp_runtime::{BuildStorage, FixedU128}; use sp_staking::Stake; pub type BlockNumber = u64; @@ -47,6 +47,7 @@ impl StakingMock { impl sp_staking::StakingInterface for StakingMock { type Balance = Balance; type AccountId = AccountId; + type CurrencyToVote = (); fn minimum_nominator_bond() -> Self::Balance { StakingMinBond::get() @@ -167,14 +168,13 @@ impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = BlockNumber; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); @@ -206,7 +206,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -245,15 +245,11 @@ impl pools::Config for Runtime { type MaxPointsToBalance = frame_support::traits::ConstU8<10>; } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Event, Config}, + System: frame_system::{Pallet, Call, Storage, Event, Config}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Pools: pools::{Pallet, Call, Storage, Event}, } @@ -323,7 +319,7 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = - frame_system::GenesisConfig::default().build_storage::().unwrap(); + frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = crate::GenesisConfig:: { min_join_bond: MinJoinBondConfig::get(), diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 4cb255e23b4b8..ac8fa5c4dbc0c 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -4269,10 +4269,7 @@ mod create { assert!(!BondedPools::::contains_key(2)); assert!(!RewardPools::::contains_key(2)); assert!(!PoolMembers::::contains_key(11)); - assert_err!( - StakingMock::active_stake(&next_pool_stash), - DispatchError::Other("balance not found") - ); + assert_err!(StakingMock::active_stake(&next_pool_stash), "balance not found"); Balances::make_free_balance_be(&11, StakingMock::minimum_nominator_bond() + ed); assert_ok!(Pools::create( @@ -5918,6 +5915,16 @@ mod commission { Error::::DoesNotHavePermission ); + // Cannot set max commission above GlobalMaxCommission + assert_noop!( + Pools::set_commission_max( + RuntimeOrigin::signed(900), + 1, + Perbill::from_percent(100) + ), + Error::::CommissionExceedsGlobalMaximum + ); + // Set a max commission commission pool 1 to 80% assert_ok!(Pools::set_commission_max( RuntimeOrigin::signed(900), @@ -6594,7 +6601,7 @@ mod commission { } #[test] - fn global_max_prevents_100_percent_commission_payout() { + fn global_max_caps_max_commission_payout() { ExtBuilder::default().build_and_execute(|| { // Note: GlobalMaxCommission is set at 90%. @@ -6604,24 +6611,31 @@ mod commission { // top up the commission payee account to existential deposit let _ = Balances::deposit_creating(&2, 5); - // Set a commission pool 1 to 100%, with a payee set to `2` - assert_ok!(Pools::set_commission( - RuntimeOrigin::signed(900), - bonded_pool.id, - Some((Perbill::from_percent(100), 2)), - )); + // Set a commission pool 1 to 100% fails. + assert_noop!( + Pools::set_commission( + RuntimeOrigin::signed(900), + bonded_pool.id, + Some((Perbill::from_percent(100), 2)), + ), + Error::::CommissionExceedsGlobalMaximum + ); assert_eq!( pool_events_since_last_call(), vec![ Event::Created { depositor: 10, pool_id: 1 }, Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, - Event::PoolCommissionUpdated { - pool_id: 1, - current: Some((Perbill::from_percent(100), 2)) - } ] ); + // Set pool commission to 90% and then set global max commission to 80%. + assert_ok!(Pools::set_commission( + RuntimeOrigin::signed(900), + bonded_pool.id, + Some((Perbill::from_percent(90), 2)), + )); + GlobalMaxCommission::::set(Some(Perbill::from_percent(80))); + // The pool earns 10 points deposit_rewards(10); @@ -6633,11 +6647,17 @@ mod commission { &mut reward_pool )); - // Confirm the commission was only 9 points out of 10 points, and the payout was 1 out - // of 10 points, reflecting the 90% global max commission. + // Confirm the commission was only 8 points out of 10 points, and the payout was 2 out + // of 10 points, reflecting the 80% global max commission. assert_eq!( pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 1 },] + vec![ + Event::PoolCommissionUpdated { + pool_id: 1, + current: Some((Perbill::from_percent(90), 2)) + }, + Event::PaidOut { member: 10, pool_id: 1, payout: 2 }, + ] ); }) } diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index 0ac50d2f745ed..eb33c9adbbf96 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_nomination_pools +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_nomination_pools. pub trait WeightInfo { @@ -107,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3300` // Estimated: `8877` - // Minimum execution time: 186_523_000 picoseconds. - Weight::from_parts(187_817_000, 8877) + // Minimum execution time: 200_966_000 picoseconds. + Weight::from_parts(208_322_000, 8877) .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } @@ -138,8 +142,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3310` // Estimated: `8877` - // Minimum execution time: 183_120_000 picoseconds. - Weight::from_parts(184_749_000, 8877) + // Minimum execution time: 197_865_000 picoseconds. + Weight::from_parts(203_085_000, 8877) .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } @@ -171,8 +175,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3375` // Estimated: `8877` - // Minimum execution time: 218_799_000 picoseconds. - Weight::from_parts(220_139_000, 8877) + // Minimum execution time: 235_496_000 picoseconds. + Weight::from_parts(242_104_000, 8877) .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -192,8 +196,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1171` // Estimated: `3702` - // Minimum execution time: 79_493_000 picoseconds. - Weight::from_parts(80_266_000, 3702) + // Minimum execution time: 81_813_000 picoseconds. + Weight::from_parts(83_277_000, 3702) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -233,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3586` // Estimated: `27847` - // Minimum execution time: 168_592_000 picoseconds. - Weight::from_parts(170_130_000, 27847) + // Minimum execution time: 183_935_000 picoseconds. + Weight::from_parts(186_920_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -255,10 +259,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1687` // Estimated: `4764` - // Minimum execution time: 63_254_000 picoseconds. - Weight::from_parts(64_154_755, 4764) - // Standard Error: 344 - .saturating_add(Weight::from_parts(8_798, 0).saturating_mul(s.into())) + // Minimum execution time: 64_962_000 picoseconds. + Weight::from_parts(67_936_216, 4764) + // Standard Error: 1_780 + .saturating_add(Weight::from_parts(36_110, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -289,10 +293,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2115` // Estimated: `27847` - // Minimum execution time: 131_339_000 picoseconds. - Weight::from_parts(133_590_267, 27847) - // Standard Error: 1_058 - .saturating_add(Weight::from_parts(9_932, 0).saturating_mul(s.into())) + // Minimum execution time: 136_073_000 picoseconds. + Weight::from_parts(141_448_439, 27847) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(75_893, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -345,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2470` // Estimated: `27847` - // Minimum execution time: 219_026_000 picoseconds. - Weight::from_parts(223_230_356, 27847) + // Minimum execution time: 230_871_000 picoseconds. + Weight::from_parts(239_533_976, 27847) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(18_u64)) } @@ -398,8 +402,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1289` // Estimated: `6196` - // Minimum execution time: 189_710_000 picoseconds. - Weight::from_parts(190_251_000, 6196) + // Minimum execution time: 194_272_000 picoseconds. + Weight::from_parts(197_933_000, 6196) .saturating_add(T::DbWeight::get().reads(22_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -432,10 +436,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1849` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_044_000 picoseconds. - Weight::from_parts(69_307_256, 4556) - // Standard Error: 6_066 - .saturating_add(Weight::from_parts(1_517_942, 0).saturating_mul(n.into())) + // Minimum execution time: 70_256_000 picoseconds. + Weight::from_parts(71_045_351, 4556) + // Standard Error: 9_898 + .saturating_add(Weight::from_parts(1_592_597, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -451,8 +455,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1438` // Estimated: `4556` - // Minimum execution time: 36_610_000 picoseconds. - Weight::from_parts(37_212_000, 4556) + // Minimum execution time: 36_233_000 picoseconds. + Weight::from_parts(37_114_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -467,10 +471,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3735` - // Minimum execution time: 15_334_000 picoseconds. - Weight::from_parts(15_753_107, 3735) - // Standard Error: 62 - .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) + // Minimum execution time: 14_494_000 picoseconds. + Weight::from_parts(15_445_658, 3735) + // Standard Error: 211 + .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -490,8 +494,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_156_000 picoseconds. - Weight::from_parts(7_596_000, 0) + // Minimum execution time: 6_776_000 picoseconds. + Weight::from_parts(7_033_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: NominationPools BondedPools (r:1 w:1) @@ -500,8 +504,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_342_000 picoseconds. - Weight::from_parts(20_699_000, 3685) + // Minimum execution time: 19_586_000 picoseconds. + Weight::from_parts(20_287_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -527,8 +531,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2012` // Estimated: `4556` - // Minimum execution time: 66_608_000 picoseconds. - Weight::from_parts(67_416_000, 4556) + // Minimum execution time: 68_086_000 picoseconds. + Weight::from_parts(70_784_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -544,8 +548,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `770` // Estimated: `3685` - // Minimum execution time: 34_619_000 picoseconds. - Weight::from_parts(34_894_000, 3685) + // Minimum execution time: 33_353_000 picoseconds. + Weight::from_parts(34_519_000, 3685) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -555,8 +559,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `571` // Estimated: `3685` - // Minimum execution time: 19_676_000 picoseconds. - Weight::from_parts(19_958_000, 3685) + // Minimum execution time: 19_020_000 picoseconds. + Weight::from_parts(19_630_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -566,8 +570,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_404_000 picoseconds. - Weight::from_parts(20_859_000, 3685) + // Minimum execution time: 19_693_000 picoseconds. + Weight::from_parts(20_114_000, 3685) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -579,8 +583,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_401_000 picoseconds. - Weight::from_parts(15_706_000, 3702) + // Minimum execution time: 14_810_000 picoseconds. + Weight::from_parts(15_526_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -596,8 +600,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `968` // Estimated: `3685` - // Minimum execution time: 66_775_000 picoseconds. - Weight::from_parts(67_242_000, 3685) + // Minimum execution time: 66_400_000 picoseconds. + Weight::from_parts(67_707_000, 3685) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -639,8 +643,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3300` // Estimated: `8877` - // Minimum execution time: 186_523_000 picoseconds. - Weight::from_parts(187_817_000, 8877) + // Minimum execution time: 200_966_000 picoseconds. + Weight::from_parts(208_322_000, 8877) .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } @@ -670,8 +674,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3310` // Estimated: `8877` - // Minimum execution time: 183_120_000 picoseconds. - Weight::from_parts(184_749_000, 8877) + // Minimum execution time: 197_865_000 picoseconds. + Weight::from_parts(203_085_000, 8877) .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } @@ -703,8 +707,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3375` // Estimated: `8877` - // Minimum execution time: 218_799_000 picoseconds. - Weight::from_parts(220_139_000, 8877) + // Minimum execution time: 235_496_000 picoseconds. + Weight::from_parts(242_104_000, 8877) .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -724,8 +728,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1171` // Estimated: `3702` - // Minimum execution time: 79_493_000 picoseconds. - Weight::from_parts(80_266_000, 3702) + // Minimum execution time: 81_813_000 picoseconds. + Weight::from_parts(83_277_000, 3702) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -765,8 +769,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3586` // Estimated: `27847` - // Minimum execution time: 168_592_000 picoseconds. - Weight::from_parts(170_130_000, 27847) + // Minimum execution time: 183_935_000 picoseconds. + Weight::from_parts(186_920_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -787,10 +791,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1687` // Estimated: `4764` - // Minimum execution time: 63_254_000 picoseconds. - Weight::from_parts(64_154_755, 4764) - // Standard Error: 344 - .saturating_add(Weight::from_parts(8_798, 0).saturating_mul(s.into())) + // Minimum execution time: 64_962_000 picoseconds. + Weight::from_parts(67_936_216, 4764) + // Standard Error: 1_780 + .saturating_add(Weight::from_parts(36_110, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -821,10 +825,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2115` // Estimated: `27847` - // Minimum execution time: 131_339_000 picoseconds. - Weight::from_parts(133_590_267, 27847) - // Standard Error: 1_058 - .saturating_add(Weight::from_parts(9_932, 0).saturating_mul(s.into())) + // Minimum execution time: 136_073_000 picoseconds. + Weight::from_parts(141_448_439, 27847) + // Standard Error: 2_472 + .saturating_add(Weight::from_parts(75_893, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -877,8 +881,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2470` // Estimated: `27847` - // Minimum execution time: 219_026_000 picoseconds. - Weight::from_parts(223_230_356, 27847) + // Minimum execution time: 230_871_000 picoseconds. + Weight::from_parts(239_533_976, 27847) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(18_u64)) } @@ -930,8 +934,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1289` // Estimated: `6196` - // Minimum execution time: 189_710_000 picoseconds. - Weight::from_parts(190_251_000, 6196) + // Minimum execution time: 194_272_000 picoseconds. + Weight::from_parts(197_933_000, 6196) .saturating_add(RocksDbWeight::get().reads(22_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -964,10 +968,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1849` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_044_000 picoseconds. - Weight::from_parts(69_307_256, 4556) - // Standard Error: 6_066 - .saturating_add(Weight::from_parts(1_517_942, 0).saturating_mul(n.into())) + // Minimum execution time: 70_256_000 picoseconds. + Weight::from_parts(71_045_351, 4556) + // Standard Error: 9_898 + .saturating_add(Weight::from_parts(1_592_597, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -983,8 +987,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1438` // Estimated: `4556` - // Minimum execution time: 36_610_000 picoseconds. - Weight::from_parts(37_212_000, 4556) + // Minimum execution time: 36_233_000 picoseconds. + Weight::from_parts(37_114_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -999,10 +1003,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3735` - // Minimum execution time: 15_334_000 picoseconds. - Weight::from_parts(15_753_107, 3735) - // Standard Error: 62 - .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) + // Minimum execution time: 14_494_000 picoseconds. + Weight::from_parts(15_445_658, 3735) + // Standard Error: 211 + .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1022,8 +1026,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_156_000 picoseconds. - Weight::from_parts(7_596_000, 0) + // Minimum execution time: 6_776_000 picoseconds. + Weight::from_parts(7_033_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: NominationPools BondedPools (r:1 w:1) @@ -1032,8 +1036,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_342_000 picoseconds. - Weight::from_parts(20_699_000, 3685) + // Minimum execution time: 19_586_000 picoseconds. + Weight::from_parts(20_287_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1059,8 +1063,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2012` // Estimated: `4556` - // Minimum execution time: 66_608_000 picoseconds. - Weight::from_parts(67_416_000, 4556) + // Minimum execution time: 68_086_000 picoseconds. + Weight::from_parts(70_784_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1076,8 +1080,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `770` // Estimated: `3685` - // Minimum execution time: 34_619_000 picoseconds. - Weight::from_parts(34_894_000, 3685) + // Minimum execution time: 33_353_000 picoseconds. + Weight::from_parts(34_519_000, 3685) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1087,8 +1091,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `571` // Estimated: `3685` - // Minimum execution time: 19_676_000 picoseconds. - Weight::from_parts(19_958_000, 3685) + // Minimum execution time: 19_020_000 picoseconds. + Weight::from_parts(19_630_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1098,8 +1102,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `531` // Estimated: `3685` - // Minimum execution time: 20_404_000 picoseconds. - Weight::from_parts(20_859_000, 3685) + // Minimum execution time: 19_693_000 picoseconds. + Weight::from_parts(20_114_000, 3685) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1111,8 +1115,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_401_000 picoseconds. - Weight::from_parts(15_706_000, 3702) + // Minimum execution time: 14_810_000 picoseconds. + Weight::from_parts(15_526_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1128,8 +1132,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `968` // Estimated: `3685` - // Minimum execution time: 66_775_000 picoseconds. - Weight::from_parts(67_242_000, 3685) + // Minimum execution time: 66_400_000 picoseconds. + Weight::from_parts(67_707_000, 3685) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/nomination-pools/test-staking/Cargo.toml b/frame/nomination-pools/test-staking/Cargo.toml index fbe5feca0febe..8ff7895a328dc 100644 --- a/frame/nomination-pools/test-staking/Cargo.toml +++ b/frame/nomination-pools/test-staking/Cargo.toml @@ -13,14 +13,14 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } scale-info = { version = "2.0.1", features = ["derive"] } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } -sp-std = { version = "5.0.0", path = "../../../primitives/std" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../../primitives/std" } sp-staking = { version = "4.0.0-dev", path = "../../../primitives/staking" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } frame-system = { version = "4.0.0-dev", path = "../../system" } frame-support = { version = "4.0.0-dev", path = "../../support" } @@ -33,5 +33,5 @@ pallet-bags-list = { version = "4.0.0-dev", path = "../../bags-list" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } pallet-nomination-pools = { version = "1.0.0-dev", path = ".." } -sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } log = { version = "0.4.0" } diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 9726f5e6dad27..02c253e62c018 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -25,11 +25,11 @@ use frame_support::{ }; use sp_runtime::{ traits::{Convert, IdentityLookup}, - FixedU128, Perbill, + BuildStorage, FixedU128, Perbill, }; type AccountId = u128; -type AccountIndex = u32; +type Nonce = u32; type BlockNumber = u64; type Balance = u128; @@ -44,14 +44,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -88,7 +87,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -109,11 +108,10 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -132,9 +130,10 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = Pools; + type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -187,13 +186,9 @@ impl pallet_nomination_pools::Config for Runtime { } type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: frame_system::{Pallet, Call, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, @@ -206,7 +201,7 @@ frame_support::construct_runtime!( pub fn new_test_ext() -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = pallet_nomination_pools::GenesisConfig:: { min_join_bond: 2, min_create_bond: 2, diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 6ebe870a78c9b..f2542b125d86f 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -41,6 +41,19 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-std/std", + "sp-core/std", + "sp-io/std" +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = [] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 23377f7883dae..3483ad743a462 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../election-provider-support" } @@ -26,16 +26,16 @@ pallet-im-online = { version = "4.0.0-dev", default-features = false, path = ".. pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../offences" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } [features] default = ["std"] @@ -57,8 +57,23 @@ std = [ "sp-staking/std", "sp-std/std", "log/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-babe/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-offences/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" ] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index e7fc39657a190..c190927b84bf1 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -50,8 +50,8 @@ use pallet_session::{ #[cfg(test)] use pallet_staking::Event as StakingEvent; use pallet_staking::{ - Config as StakingConfig, Exposure, IndividualExposure, Pallet as Staking, RewardDestination, - ValidatorPrefs, + Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, + RewardDestination, ValidatorPrefs, }; const SEED: u32 = 0; @@ -283,7 +283,7 @@ benchmarks! { let r in 1 .. MAX_REPORTERS; // we skip 1 offender, because in such case there is no slashing let o in 2 .. MAX_OFFENDERS; - let n in 0 .. MAX_NOMINATORS.min(::MaxNominations::get()); + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); // Make r reporters let mut reporters = vec![]; @@ -399,7 +399,7 @@ benchmarks! { } report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(::MaxNominations::get()); + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); // for grandpa equivocation reports the number of reporters // and offenders is always 1 @@ -429,14 +429,14 @@ benchmarks! { + 1 // offence + 3 // reporter (reward + endowment) + 1 // offenders reported - + 2 // offenders slashed + + 3 // offenders slashed + 1 // offenders chilled - + 2 * n // nominators slashed + + 3 * n // nominators slashed ); } report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(::MaxNominations::get()); + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); // for babe equivocation reports the number of reporters // and offenders is always 1 @@ -466,9 +466,9 @@ benchmarks! { + 1 // offence + 3 // reporter (reward + endowment) + 1 // offenders reported - + 2 // offenders slashed + + 3 // offenders slashed + 1 // offenders chilled - + 2 * n // nominators slashed + + 3 * n // nominators slashed ); } diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 668d88e0bf3d0..88f418dd3e2e8 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -20,7 +20,10 @@ #![cfg(test)] use super::*; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, @@ -30,11 +33,11 @@ use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::IdentityLookup, + BuildStorage, }; type AccountId = u64; -type AccountIndex = u32; -type BlockNumber = u64; +type Nonce = u32; type Balance = u64; impl frame_system::Config for Test { @@ -43,14 +46,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -76,7 +78,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -142,6 +144,7 @@ pallet_staking_reward_curve::build! { } parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub type Extrinsic = sp_runtime::testing::TestXt; @@ -153,16 +156,14 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBounds; } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -180,9 +181,10 @@ impl pallet_staking::Config for Test { type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type OnStakerSlash = (); + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -197,7 +199,6 @@ impl pallet_im_online::Config for Test { type WeightInfo = (); type MaxKeys = ConstU32<10_000>; type MaxPeerInHeartbeats = ConstU32<10_000>; - type MaxPeerDataEncodingSize = ConstU32<1_000>; } impl pallet_offences::Config for Test { @@ -220,10 +221,7 @@ pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Test { System: system::{Pallet, Call, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, @@ -236,6 +234,6 @@ frame_support::construct_runtime!( ); pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); sp_io::TestExternalities::new(t) } diff --git a/frame/offences/src/migration.rs b/frame/offences/src/migration.rs index 07bd68407d378..3c0d243a55d98 100644 --- a/frame/offences/src/migration.rs +++ b/frame/offences/src/migration.rs @@ -29,6 +29,8 @@ use sp_std::vec::Vec; #[cfg(feature = "try-runtime")] use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; mod v0 { use super::*; @@ -51,10 +53,7 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain < 1, "pallet_offences::MigrateToV1 migration can be deleted"); - + fn pre_upgrade() -> Result, TryRuntimeError> { log::info!( target: LOG_TARGET, "Number of reports to refund and delete: {}", @@ -65,23 +64,20 @@ pub mod v1 { } fn on_runtime_upgrade() -> Weight { - let onchain = Pallet::::on_chain_storage_version(); - - if onchain > 0 { + if Pallet::::on_chain_storage_version() > 0 { log::info!(target: LOG_TARGET, "pallet_offences::MigrateToV1 should be removed"); return T::DbWeight::get().reads(1) } let keys_removed = v0::ReportsByKindIndex::::clear(u32::MAX, None).unique as u64; - let weight = T::DbWeight::get().reads_writes(keys_removed, keys_removed); - StorageVersion::new(1).put::>(); - weight + // + 1 for reading/writing the new storage version + T::DbWeight::get().reads_writes(keys_removed + 1, keys_removed + 1) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { let onchain = Pallet::::on_chain_storage_version(); ensure!(onchain == 1, "pallet_offences::MigrateToV1 needs to be run"); ensure!( @@ -145,7 +141,7 @@ mod test { ext.execute_with(|| { assert_eq!( v1::MigrateToV1::::on_runtime_upgrade(), - ::DbWeight::get().reads_writes(1, 1), + ::DbWeight::get().reads_writes(2, 2), ); assert!(>::iter_values().count() == 0); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 17480be76c1d8..990ceae5ac01e 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -29,9 +29,8 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, + BuildStorage, Perbill, }; use sp_staking::{ offence::{self, DisableStrategy, Kind, OffenceDetails}, @@ -66,16 +65,12 @@ pub fn with_on_offence_fractions) -> R>(f: F) -> OnOffencePerbill::mutate(|fractions| f(fractions)) } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Offences: offences::{Pallet, Storage, Event}, } ); @@ -86,14 +81,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -114,7 +108,7 @@ impl Config for Runtime { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext diff --git a/frame/paged-list/Cargo.toml b/frame/paged-list/Cargo.toml new file mode 100644 index 0000000000000..3d7010d331b2e --- /dev/null +++ b/frame/paged-list/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pallet-paged-list" +version = "0.1.0" +description = "FRAME pallet that provides a paged list data structure." +authors = ["Parity Technologies "] +homepage = "https://substrate.io" +edition = "2021" +license = "Apache-2.0" +repository = "https://github.com/paritytech/substrate" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } +docify = "0.2.1" +scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", path = "../../primitives/core", default-features = false } +sp-io = { version = "23.0.0", path = "../../primitives/io", default-features = false } + +[features] +default = ["std"] + +std = ["codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std"] + +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks"] + +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/paged-list/fuzzer/Cargo.toml b/frame/paged-list/fuzzer/Cargo.toml new file mode 100644 index 0000000000000..9402ca8a48477 --- /dev/null +++ b/frame/paged-list/fuzzer/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "pallet-paged-list-fuzzer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Fuzz storage types of pallet-paged-list" +publish = false + +[[bin]] +name = "pallet-paged-list" +path = "src/paged_list.rs" + +[dependencies] +arbitrary = "1.3.0" +honggfuzz = "0.5.49" + +frame-support = { version = "4.0.0-dev", default-features = false, features = [ "std" ], path = "../../support" } +sp-io = { path = "../../../primitives/io", default-features = false, features = [ "std" ] } +pallet-paged-list = { path = "../", default-features = false, features = [ "std" ] } diff --git a/frame/paged-list/fuzzer/src/paged_list.rs b/frame/paged-list/fuzzer/src/paged_list.rs new file mode 100644 index 0000000000000..43b797eee6bfb --- /dev/null +++ b/frame/paged-list/fuzzer/src/paged_list.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run pallet-paged-list`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug pallet-paged-list hfuzz_workspace/pallet-paged-list/*.fuzz`. +//! +//! # More information +//! More information about `honggfuzz` can be found +//! [here](https://docs.rs/honggfuzz/). + +use arbitrary::Arbitrary; +use honggfuzz::fuzz; + +use frame_support::{storage::StorageList, StorageNoopGuard}; +use pallet_paged_list::mock::{PagedList as List, *}; +use sp_io::TestExternalities; +type Meta = MetaOf; + +fn main() { + loop { + fuzz!(|data: (Vec, u8)| { + drain_append_work(data.0, data.1); + }); + } +} + +/// Appends and drains random number of elements in random order and checks storage invariants. +/// +/// It also changes the maximal number of elements per page dynamically, hence the `page_size`. +fn drain_append_work(ops: Vec, page_size: u8) { + if page_size == 0 { + return + } + + TestExternalities::default().execute_with(|| { + ValuesPerNewPage::set(&page_size.into()); + let _g = StorageNoopGuard::default(); + let mut total: i64 = 0; + + for op in ops.into_iter() { + total += op.exec(); + + assert!(total >= 0); + assert_eq!(List::iter().count(), total as usize); + + // We have the assumption that the queue removes the metadata when empty. + if total == 0 { + assert_eq!(List::drain().count(), 0); + assert_eq!(Meta::from_storage().unwrap_or_default(), Default::default()); + } + } + + assert_eq!(List::drain().count(), total as usize); + // `StorageNoopGuard` checks that there is no storage leaked. + }); +} + +enum Op { + Append(Vec), + Drain(u8), +} + +impl Arbitrary<'_> for Op { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + if u.arbitrary::()? { + Ok(Op::Append(Vec::::arbitrary(u)?)) + } else { + Ok(Op::Drain(u.arbitrary::()?)) + } + } +} + +impl Op { + pub fn exec(self) -> i64 { + match self { + Op::Append(v) => { + let l = v.len(); + List::append_many(v); + l as i64 + }, + Op::Drain(v) => -(List::drain().take(v as usize).count() as i64), + } + } +} diff --git a/frame/paged-list/src/lib.rs b/frame/paged-list/src/lib.rs new file mode 100644 index 0000000000000..ddeed174f34bb --- /dev/null +++ b/frame/paged-list/src/lib.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! > Made with *Substrate*, for *DotSama*. +//! +//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) - +//! [![polkadot]](https://polkadot.network) +//! +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! +//! # Paged List Pallet +//! +//! A thin wrapper pallet around a [`paged_list::StoragePagedList`]. It provides an API for a single +//! paginated list. It can be instantiated multiple times to provide multiple lists. +//! +//! ## Overview +//! +//! The pallet is quite unique since it does not expose any `Call`s, `Error`s or `Event`s. All +//! interaction goes through the implemented [`StorageList`][frame_support::storage::StorageList] +//! trait. +//! +//! A fuzzer for testing is provided in crate `pallet-paged-list-fuzzer`. +//! +//! ## Examples +//! +//! 1. **Appending** some data to the list can happen either by [`Pallet::append_one`]: +#![doc = docify::embed!("src/tests.rs", append_one_works)] +//! 2. or by [`Pallet::append_many`]. This should always be preferred to repeated calls to +//! [`Pallet::append_one`]: +#![doc = docify::embed!("src/tests.rs", append_many_works)] +//! 3. If you want to append many values (ie. in a loop), then best use the [`Pallet::appender`]: +#![doc = docify::embed!("src/tests.rs", appender_works)] +//! 4. **Iterating** over the list can be done with [`Pallet::iter`]. It uses the standard +//! `Iterator` trait: +#![doc = docify::embed!("src/tests.rs", iter_works)] +//! 5. **Draining** elements happens through the [`Pallet::drain`] iterator. Note that even +//! *peeking* a value will already remove it. +#![doc = docify::embed!("src/tests.rs", drain_works)] +//! +//! ## Pallet API +//! +//! None. Only things to consider is the [`Config`] traits. +//! +//! ## Low Level / Implementation Details +//! +//! Implementation details are documented in [`paged_list::StoragePagedList`]. +//! All storage entries are prefixed with a unique prefix that is generated by [`ListPrefix`]. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +pub mod mock; +mod paged_list; +mod tests; + +use codec::FullCodec; +use frame_support::{ + pallet_prelude::StorageList, + traits::{PalletInfoAccess, StorageInstance}, +}; +pub use paged_list::StoragePagedList; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The value type that can be stored in the list. + type Value: FullCodec; + + /// The number of values that can be put into newly created pages. + /// + /// Note that this does not retroactively affect already created pages. This value can be + /// changed at any time without requiring a runtime migration. + #[pallet::constant] + type ValuesPerNewPage: Get; + } + + /// A storage paged list akin to what the FRAME macros would generate. + // Note that FRAME does natively support paged lists in storage. + pub type List = StoragePagedList< + ListPrefix, + >::Value, + >::ValuesPerNewPage, + >; +} + +// This exposes the list functionality to other pallets. +impl, I: 'static> StorageList for Pallet { + type Iterator = as StorageList>::Iterator; + type Appender = as StorageList>::Appender; + + fn iter() -> Self::Iterator { + List::::iter() + } + + fn drain() -> Self::Iterator { + List::::drain() + } + + fn appender() -> Self::Appender { + List::::appender() + } +} + +/// Generates a unique storage prefix for each instance of the pallet. +pub struct ListPrefix(core::marker::PhantomData<(T, I)>); + +impl, I: 'static> StorageInstance for ListPrefix { + fn pallet_prefix() -> &'static str { + crate::Pallet::::name() + } + + const STORAGE_PREFIX: &'static str = "paged_list"; +} diff --git a/frame/paged-list/src/mock.rs b/frame/paged-list/src/mock.rs new file mode 100644 index 0000000000000..390b4a8530dce --- /dev/null +++ b/frame/paged-list/src/mock.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helpers for tests. + +#![cfg(feature = "std")] + +use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; +use frame_support::traits::{ConstU16, ConstU64}; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + PagedList: crate, + PagedList2: crate::, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = ConstU16<42>; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +frame_support::parameter_types! { + pub storage ValuesPerNewPage: u32 = 5; + pub const MaxPages: Option = Some(20); +} + +impl crate::Config for Test { + type Value = u32; + type ValuesPerNewPage = ValuesPerNewPage; +} + +impl crate::Config for Test { + type Value = u32; + type ValuesPerNewPage = ValuesPerNewPage; +} + +pub type MetaOf = + StoragePagedListMeta, ::Value, ::ValuesPerNewPage>; + +/// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::::default().build_storage().unwrap().into() +} + +/// Run this closure in test externalities. +pub fn test_closure(f: impl FnOnce() -> R) -> R { + let mut ext = new_test_ext(); + ext.execute_with(f) +} diff --git a/frame/paged-list/src/paged_list.rs b/frame/paged-list/src/paged_list.rs new file mode 100644 index 0000000000000..37ebe80d93448 --- /dev/null +++ b/frame/paged-list/src/paged_list.rs @@ -0,0 +1,581 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Paged storage list. + +// links are better than no links - even when they refer to private stuff. +#![allow(rustdoc::private_intra_doc_links)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(missing_docs)] +#![deny(unsafe_code)] + +use codec::{Decode, Encode, EncodeLike, FullCodec}; +use core::marker::PhantomData; +use frame_support::{ + defensive, + storage::StoragePrefixedContainer, + traits::{Get, StorageInstance}, + CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, +}; +use sp_runtime::traits::Saturating; +use sp_std::prelude::*; + +pub type PageIndex = u32; +pub type ValueIndex = u32; + +/// A paginated storage list. +/// +/// # Motivation +/// +/// This type replaces `StorageValue>` in situations where only iteration and appending is +/// needed. There are a few places where this is the case. A paginated structure reduces the memory +/// usage when a storage transactions needs to be rolled back. The main motivation is therefore a +/// reduction of runtime memory on storage transaction rollback. Should be configured such that the +/// size of a page is about 64KiB. This can only be ensured when `V` implements `MaxEncodedLen`. +/// +/// # Implementation +/// +/// The metadata of this struct is stored in [`StoragePagedListMeta`]. The data is stored in +/// [`Page`]s. +/// +/// Each [`Page`] holds at most `ValuesPerNewPage` values in its `values` vector. The last page is +/// the only one that could have less than `ValuesPerNewPage` values. +/// **Iteration** happens by starting +/// at [`first_page`][StoragePagedListMeta::first_page]/ +/// [`first_value_offset`][StoragePagedListMeta::first_value_offset] and incrementing these indices +/// as long as there are elements in the page and there are pages in storage. All elements of a page +/// are loaded once a page is read from storage. Iteration then happens on the cached elements. This +/// reduces the number of storage `read` calls on the overlay. **Appending** to the list happens by +/// appending to the last page by utilizing [`sp_io::storage::append`]. It allows to directly extend +/// the elements of `values` vector of the page without loading the whole vector from storage. A new +/// page is instantiated once [`Page::next`] overflows `ValuesPerNewPage`. Its vector will also be +/// created through [`sp_io::storage::append`]. **Draining** advances the internal indices identical +/// to Iteration. It additionally persists the increments to storage and thereby 'drains' elements. +/// Completely drained pages are deleted from storage. +/// +/// # Further Observations +/// +/// - The encoded layout of a page is exactly its [`Page::values`]. The [`Page::next`] offset is +/// stored in the [`StoragePagedListMeta`] instead. There is no particular reason for this, +/// besides having all management state handy in one location. +/// - The PoV complexity of iterating compared to a `StorageValue>` is improved for +/// "shortish" iterations and worse for total iteration. The append complexity is identical in the +/// asymptotic case when using an `Appender`, and worse in all. For example when appending just +/// one value. +/// - It does incur a read overhead on the host side as compared to a `StorageValue>`. +pub struct StoragePagedList { + _phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>, +} + +/// The state of a [`StoragePagedList`]. +/// +/// This struct doubles as [`frame_support::storage::StorageList::Appender`]. +#[derive( + Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, DebugNoBound, DefaultNoBound, +)] +// todo ignore scale bounds +pub struct StoragePagedListMeta { + /// The first page that could contain a value. + /// + /// Can be >0 when pages were deleted. + pub first_page: PageIndex, + /// The first index inside `first_page` that could contain a value. + /// + /// Can be >0 when values were deleted. + pub first_value_offset: ValueIndex, + + /// The last page that could contain data. + /// + /// Appending starts at this page index. + pub last_page: PageIndex, + /// The last value inside `last_page` that could contain a value. + /// + /// Appending starts at this index. If the page does not hold a value at this index, then the + /// whole list is empty. The only case where this can happen is when both are `0`. + pub last_page_len: ValueIndex, + + _phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>, +} + +impl frame_support::storage::StorageAppender + for StoragePagedListMeta +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + fn append(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + self.append_one(item); + } +} + +impl StoragePagedListMeta +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + pub fn from_storage() -> Option { + let key = Self::key(); + + sp_io::storage::get(&key).and_then(|raw| Self::decode(&mut &raw[..]).ok()) + } + + pub fn key() -> Vec { + meta_key::() + } + + pub fn append_one(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + // Note: we use >= here in case someone decreased it in a runtime upgrade. + if self.last_page_len >= ValuesPerNewPage::get() { + self.last_page.saturating_inc(); + self.last_page_len = 0; + } + let key = page_key::(self.last_page); + self.last_page_len.saturating_inc(); + sp_io::storage::append(&key, item.encode()); + self.store(); + } + + pub fn store(&self) { + let key = Self::key(); + self.using_encoded(|enc| sp_io::storage::set(&key, enc)); + } + + pub fn reset(&mut self) { + *self = Default::default(); + Self::delete(); + } + + pub fn delete() { + sp_io::storage::clear(&Self::key()); + } +} + +/// A page that was decoded from storage and caches its values. +pub struct Page { + /// The index of the page. + index: PageIndex, + /// The remaining values of the page, to be drained by [`Page::next`]. + values: sp_std::iter::Skip>, +} + +impl Page { + /// Read the page with `index` from storage and assume the first value at `value_index`. + pub fn from_storage( + index: PageIndex, + value_index: ValueIndex, + ) -> Option { + let key = page_key::(index); + let values = sp_io::storage::get(&key) + .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; + if values.is_empty() { + // Dont create empty pages. + return None + } + let values = values.into_iter().skip(value_index as usize); + + Some(Self { index, values }) + } + + /// Whether no more values can be read from this page. + pub fn is_eof(&self) -> bool { + self.values.len() == 0 + } + + /// Delete this page from storage. + pub fn delete(&self) { + delete_page::(self.index); + } +} + +/// Delete a page with `index` from storage. +// Does not live under `Page` since it does not require the `Value` generic. +pub(crate) fn delete_page(index: PageIndex) { + let key = page_key::(index); + sp_io::storage::clear(&key); +} + +/// Storage key of a page with `index`. +// Does not live under `Page` since it does not require the `Value` generic. +pub(crate) fn page_key(index: PageIndex) -> Vec { + (StoragePagedListPrefix::::final_prefix(), b"page", index).encode() +} + +pub(crate) fn meta_key() -> Vec { + (StoragePagedListPrefix::::final_prefix(), b"meta").encode() +} + +impl Iterator for Page { + type Item = V; + + fn next(&mut self) -> Option { + self.values.next() + } +} + +/// Iterates over values of a [`StoragePagedList`]. +/// +/// Can optionally drain the iterated values. +pub struct StoragePagedListIterator { + // Design: we put the Page into the iterator to have fewer storage look-ups. Yes, these + // look-ups would be cached anyway, but bugging the overlay on each `.next` call still seems + // like a poor trade-off than caching it in the iterator directly. Iterating and modifying is + // not allowed at the same time anyway, just like with maps. Note: if Page is empty then + // the iterator did not find any data upon setup or ran out of pages. + page: Option>, + drain: bool, + meta: StoragePagedListMeta, +} + +impl StoragePagedListIterator +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + /// Read self from the storage. + pub fn from_meta( + meta: StoragePagedListMeta, + drain: bool, + ) -> Self { + let page = Page::::from_storage::(meta.first_page, meta.first_value_offset); + Self { page, drain, meta } + } +} + +impl Iterator + for StoragePagedListIterator +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + type Item = Value; + + fn next(&mut self) -> Option { + let page = self.page.as_mut()?; + let value = match page.next() { + Some(value) => value, + None => { + defensive!("There are no empty pages in storage; nuking the list"); + self.meta.reset(); + self.page = None; + return None + }, + }; + + if page.is_eof() { + if self.drain { + page.delete::(); + self.meta.first_value_offset = 0; + self.meta.first_page.saturating_inc(); + } + + debug_assert!(!self.drain || self.meta.first_page == page.index + 1); + self.page = Page::from_storage::(page.index.saturating_add(1), 0); + if self.drain { + if self.page.is_none() { + self.meta.reset(); + } else { + self.meta.store(); + } + } + } else { + if self.drain { + self.meta.first_value_offset.saturating_inc(); + self.meta.store(); + } + } + Some(value) + } +} + +impl frame_support::storage::StorageList + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + type Iterator = StoragePagedListIterator; + type Appender = StoragePagedListMeta; + + fn iter() -> Self::Iterator { + StoragePagedListIterator::from_meta(Self::read_meta(), false) + } + + fn drain() -> Self::Iterator { + StoragePagedListIterator::from_meta(Self::read_meta(), true) + } + + fn appender() -> Self::Appender { + Self::appender() + } +} + +impl StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + fn read_meta() -> StoragePagedListMeta { + // Use default here to not require a setup migration. + StoragePagedListMeta::from_storage().unwrap_or_default() + } + + /// Provides a fast append iterator. + /// + /// The list should not be modified while appending. Also don't call it recursively. + fn appender() -> StoragePagedListMeta { + Self::read_meta() + } + + /// Return the elements of the list. + #[cfg(test)] + fn as_vec() -> Vec { + >::iter().collect() + } + + /// Return and remove the elements of the list. + #[cfg(test)] + fn as_drained_vec() -> Vec { + >::drain().collect() + } +} + +/// Provides the final prefix for a [`StoragePagedList`]. +/// +/// It solely exists so that when re-using it from the iterator and meta struct, none of the un-used +/// generics bleed through. Otherwise when only having the `StoragePrefixedContainer` implementation +/// on the list directly, the iterator and metadata need to muster *all* generics, even the ones +/// that are completely useless for prefix calculation. +struct StoragePagedListPrefix(PhantomData); + +impl frame_support::storage::StoragePrefixedContainer for StoragePagedListPrefix +where + Prefix: StorageInstance, +{ + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } +} + +impl frame_support::storage::StoragePrefixedContainer + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + ValuesPerNewPage: Get, +{ + fn module_prefix() -> &'static [u8] { + StoragePagedListPrefix::::module_prefix() + } + + fn storage_prefix() -> &'static [u8] { + StoragePagedListPrefix::::storage_prefix() + } +} + +/// Prelude for (doc)tests. +#[cfg(feature = "std")] +#[allow(dead_code)] +pub(crate) mod mock { + pub use super::*; + pub use frame_support::{ + metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}, + parameter_types, + storage::{types::ValueQuery, StorageList as _}, + StorageNoopGuard, + }; + pub use sp_io::{hashing::twox_128, TestExternalities}; + + parameter_types! { + pub const ValuesPerNewPage: u32 = 5; + pub const MaxPages: Option = Some(20); + } + + pub struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + pub type List = StoragePagedList; +} + +#[cfg(test)] +mod tests { + use super::mock::*; + + #[test] + fn append_works() { + TestExternalities::default().execute_with(|| { + List::append_many(0..1000); + assert_eq!(List::as_vec(), (0..1000).collect::>()); + }); + } + + /// Draining all works. + #[test] + fn simple_drain_works() { + TestExternalities::default().execute_with(|| { + let _g = StorageNoopGuard::default(); // All in all a No-Op + List::append_many(0..1000); + + assert_eq!(List::as_drained_vec(), (0..1000).collect::>()); + + assert_eq!(List::read_meta(), Default::default()); + + // all gone + assert_eq!(List::as_vec(), Vec::::new()); + // Need to delete the metadata manually. + StoragePagedListMeta::::delete(); + }); + } + + /// Drain half of the elements and iterator the rest. + #[test] + fn partial_drain_works() { + TestExternalities::default().execute_with(|| { + List::append_many(0..100); + + let vals = List::drain().take(50).collect::>(); + assert_eq!(vals, (0..50).collect::>()); + + let meta = List::read_meta(); + // Will switch over to `10/0`, but will in the next call. + assert_eq!((meta.first_page, meta.first_value_offset), (10, 0)); + + // 50 gone, 50 to go + assert_eq!(List::as_vec(), (50..100).collect::>()); + }); + } + + /// Draining, appending and iterating work together. + #[test] + fn drain_append_iter_works() { + TestExternalities::default().execute_with(|| { + for r in 1..=100 { + List::append_many(0..12); + List::append_many(0..12); + + let dropped = List::drain().take(12).collect::>(); + assert_eq!(dropped, (0..12).collect::>()); + + assert_eq!(List::as_vec(), (0..12).cycle().take(r * 12).collect::>()); + } + }); + } + + /// Pages are removed ASAP. + #[test] + fn drain_eager_page_removal() { + TestExternalities::default().execute_with(|| { + List::append_many(0..9); + + assert!(sp_io::storage::exists(&page_key::(0))); + assert!(sp_io::storage::exists(&page_key::(1))); + + assert_eq!(List::drain().take(5).count(), 5); + // Page 0 is eagerly removed. + assert!(!sp_io::storage::exists(&page_key::(0))); + assert!(sp_io::storage::exists(&page_key::(1))); + }); + } + + /// Appending encodes pages as `Vec`. + #[test] + fn append_storage_layout() { + TestExternalities::default().execute_with(|| { + List::append_many(0..9); + + let key = page_key::(0); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let as_vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(as_vec.len(), 5, "First page contains 5"); + + let key = page_key::(1); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let as_vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(as_vec.len(), 4, "Second page contains 4"); + + let meta = sp_io::storage::get(&meta_key::()).expect("Meta should be present"); + let meta: StoragePagedListMeta = + Decode::decode(&mut &meta[..]).unwrap(); + assert_eq!(meta.first_page, 0); + assert_eq!(meta.first_value_offset, 0); + assert_eq!(meta.last_page, 1); + assert_eq!(meta.last_page_len, 4); + + let page = Page::::from_storage::(0, 0).unwrap(); + assert_eq!(page.index, 0); + assert_eq!(page.values.count(), 5); + + let page = Page::::from_storage::(1, 0).unwrap(); + assert_eq!(page.index, 1); + assert_eq!(page.values.count(), 4); + }); + } + + #[test] + fn page_key_correct() { + let got = page_key::(0); + let pallet_prefix = StoragePagedListPrefix::::final_prefix(); + let want = (pallet_prefix, b"page", 0).encode(); + + assert_eq!(want.len(), 32 + 4 + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got, want); + } + + #[test] + fn meta_key_correct() { + let got = meta_key::(); + let pallet_prefix = StoragePagedListPrefix::::final_prefix(); + let want = (pallet_prefix, b"meta").encode(); + + assert_eq!(want.len(), 32 + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got, want); + } + + #[test] + fn peekable_drain_also_deletes() { + TestExternalities::default().execute_with(|| { + List::append_many(0..10); + + let mut iter = List::drain().peekable(); + assert_eq!(iter.peek(), Some(&0)); + // `peek` does remove one element... + assert_eq!(List::iter().count(), 9); + }); + } +} diff --git a/frame/paged-list/src/tests.rs b/frame/paged-list/src/tests.rs new file mode 100644 index 0000000000000..becb4b23508ef --- /dev/null +++ b/frame/paged-list/src/tests.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mostly pallet doc-tests. Real tests are in [`super::paged_list`] and crate +//! `pallet-paged-list-fuzzer`. + +#![cfg(test)] + +use crate::{mock::*, *}; +use frame_support::storage::{StorageList, StoragePrefixedContainer}; + +#[docify::export] +#[test] +fn append_one_works() { + test_closure(|| { + PagedList::append_one(1); + + assert_eq!(PagedList::iter().collect::>(), vec![1]); + }); +} + +#[docify::export] +#[test] +fn append_many_works() { + test_closure(|| { + PagedList::append_many(0..3); + + assert_eq!(PagedList::iter().collect::>(), vec![0, 1, 2]); + }); +} + +#[docify::export] +#[test] +fn appender_works() { + use frame_support::storage::StorageAppender; + test_closure(|| { + let mut appender = PagedList::appender(); + + appender.append(0); + appender.append(1); // Repeated calls are fine here. + appender.append_many(2..4); + + assert_eq!(PagedList::iter().collect::>(), vec![0, 1, 2, 3]); + }); +} + +#[docify::export] +#[test] +fn iter_works() { + test_closure(|| { + PagedList::append_many(0..10); + let mut iter = PagedList::iter(); + + assert_eq!(iter.next(), Some(0)); + assert_eq!(iter.next(), Some(1)); + assert_eq!(iter.collect::>(), (2..10).collect::>()); + }); +} + +#[docify::export] +#[test] +fn drain_works() { + test_closure(|| { + PagedList::append_many(0..3); + PagedList::drain().next(); + assert_eq!(PagedList::iter().collect::>(), vec![1, 2], "0 is drained"); + PagedList::drain().peekable().peek(); + assert_eq!(PagedList::iter().collect::>(), vec![2], "Peeking removed 1"); + }); +} + +#[test] +fn iter_independent_works() { + test_closure(|| { + PagedList::append_many(0..1000); + PagedList2::append_many(0..1000); + + assert_eq!(PagedList::iter().collect::>(), (0..1000).collect::>()); + assert_eq!(PagedList::iter().collect::>(), (0..1000).collect::>()); + + // drain + assert_eq!(PagedList::drain().collect::>(), (0..1000).collect::>()); + assert_eq!(PagedList2::iter().collect::>(), (0..1000).collect::>()); + + assert_eq!(PagedList::iter().count(), 0); + }); +} + +#[test] +fn prefix_distinct() { + let p1 = List::::final_prefix(); + let p2 = List::::final_prefix(); + assert_ne!(p1, p2); +} diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 57dda88b1e70a..b942823a2d26b 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -9,20 +9,20 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for storing preimages of hashes" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, optional = true, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, optional = true, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } [features] default = ["std"] @@ -30,6 +30,9 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] std = [ "codec/std", @@ -42,7 +45,11 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" ] try-runtime = [ "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 60208424db953..5ab1e7643b2e7 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -108,7 +108,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs index be352201da6cd..46e555498cd2d 100644 --- a/frame/preimage/src/migration.rs +++ b/frame/preimage/src/migration.rs @@ -24,6 +24,11 @@ use frame_support::{ }; use sp_std::collections::btree_map::BTreeMap; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + /// The log target. const TARGET: &'static str = "runtime::preimage::migration::v1"; @@ -78,8 +83,8 @@ pub mod v1 { impl OnRuntimeUpgrade for Migration { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + fn pre_upgrade() -> Result, TryRuntimeError> { + ensure!(StorageVersion::get::>() == 0, "can only upgrade from version 0"); let images = v0::image_count::().expect("v0 storage corrupted"); log::info!(target: TARGET, "Migrating {} images", &images); @@ -148,7 +153,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { + fn post_upgrade(state: Vec) -> DispatchResult { let old_images: u32 = Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); let new_images = image_count::().expect("V1 storage corrupted"); @@ -161,7 +166,7 @@ pub mod v1 { old_images ); } - assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + ensure!(StorageVersion::get::>() == 1, "must upgrade"); Ok(()) } } diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 5054a77a8123f..2fb9f36dec454 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -28,18 +28,14 @@ use frame_support::{ use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -54,13 +50,12 @@ impl frame_system::Config for Test { type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -86,7 +81,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -104,7 +99,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances = pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], }; diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index 2177309db1612..41e58a1027800 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_preimage. pub trait WeightInfo { @@ -74,10 +78,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `143` // Estimated: `3556` - // Minimum execution time: 31_578_000 picoseconds. - Weight::from_parts(31_955_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) + // Minimum execution time: 30_479_000 picoseconds. + Weight::from_parts(23_381_775, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_670, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -90,10 +94,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 17_017_000 picoseconds. - Weight::from_parts(17_549_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) + // Minimum execution time: 16_104_000 picoseconds. + Weight::from_parts(18_393_879, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_669, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -106,10 +110,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(16_624_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) + // Minimum execution time: 15_652_000 picoseconds. + Weight::from_parts(22_031_627, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -121,8 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `289` // Estimated: `3556` - // Minimum execution time: 38_016_000 picoseconds. - Weight::from_parts(38_909_000, 3556) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(40_247_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -134,8 +138,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 21_408_000 picoseconds. - Weight::from_parts(22_343_000, 3556) + // Minimum execution time: 19_909_000 picoseconds. + Weight::from_parts(21_572_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -145,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `188` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_639_000, 3556) + // Minimum execution time: 17_602_000 picoseconds. + Weight::from_parts(18_899_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -156,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 12_028_000 picoseconds. - Weight::from_parts(12_509_000, 3556) + // Minimum execution time: 11_253_000 picoseconds. + Weight::from_parts(11_667_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -167,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3556` - // Minimum execution time: 13_568_000 picoseconds. - Weight::from_parts(14_161_000, 3556) + // Minimum execution time: 14_152_000 picoseconds. + Weight::from_parts(14_652_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -178,8 +182,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_538_000 picoseconds. - Weight::from_parts(8_933_000, 3556) + // Minimum execution time: 8_267_000 picoseconds. + Weight::from_parts(8_969_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -191,8 +195,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 20_692_000 picoseconds. - Weight::from_parts(21_770_000, 3556) + // Minimum execution time: 18_429_000 picoseconds. + Weight::from_parts(18_946_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -202,8 +206,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_572_000 picoseconds. - Weight::from_parts(8_795_000, 3556) + // Minimum execution time: 7_910_000 picoseconds. + Weight::from_parts(8_272_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -213,8 +217,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_266_000 picoseconds. - Weight::from_parts(8_721_000, 3556) + // Minimum execution time: 7_936_000 picoseconds. + Weight::from_parts(8_504_000, 3556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -231,10 +235,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `143` // Estimated: `3556` - // Minimum execution time: 31_578_000 picoseconds. - Weight::from_parts(31_955_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) + // Minimum execution time: 30_479_000 picoseconds. + Weight::from_parts(23_381_775, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_670, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -247,10 +251,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 17_017_000 picoseconds. - Weight::from_parts(17_549_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) + // Minimum execution time: 16_104_000 picoseconds. + Weight::from_parts(18_393_879, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_669, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -263,10 +267,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(16_624_000, 3556) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(s.into())) + // Minimum execution time: 15_652_000 picoseconds. + Weight::from_parts(22_031_627, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -278,8 +282,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `289` // Estimated: `3556` - // Minimum execution time: 38_016_000 picoseconds. - Weight::from_parts(38_909_000, 3556) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(40_247_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -291,8 +295,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 21_408_000 picoseconds. - Weight::from_parts(22_343_000, 3556) + // Minimum execution time: 19_909_000 picoseconds. + Weight::from_parts(21_572_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -302,8 +306,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `188` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_639_000, 3556) + // Minimum execution time: 17_602_000 picoseconds. + Weight::from_parts(18_899_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -313,8 +317,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 12_028_000 picoseconds. - Weight::from_parts(12_509_000, 3556) + // Minimum execution time: 11_253_000 picoseconds. + Weight::from_parts(11_667_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -324,8 +328,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3556` - // Minimum execution time: 13_568_000 picoseconds. - Weight::from_parts(14_161_000, 3556) + // Minimum execution time: 14_152_000 picoseconds. + Weight::from_parts(14_652_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -335,8 +339,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_538_000 picoseconds. - Weight::from_parts(8_933_000, 3556) + // Minimum execution time: 8_267_000 picoseconds. + Weight::from_parts(8_969_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -348,8 +352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `144` // Estimated: `3556` - // Minimum execution time: 20_692_000 picoseconds. - Weight::from_parts(21_770_000, 3556) + // Minimum execution time: 18_429_000 picoseconds. + Weight::from_parts(18_946_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -359,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_572_000 picoseconds. - Weight::from_parts(8_795_000, 3556) + // Minimum execution time: 7_910_000 picoseconds. + Weight::from_parts(8_272_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -370,8 +374,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 8_266_000 picoseconds. - Weight::from_parts(8_721_000, 3556) + // Minimum execution time: 7_936_000 picoseconds. + Weight::from_parts(8_504_000, 3556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 065cbe5eba252..a32a31ce41ade 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-utility = { version = "4.0.0-dev", path = "../utility" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -38,10 +38,22 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "pallet-utility/std", + "sp-core/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-utility/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 7244dd5f17472..e0d14163d21b2 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use crate::Pallet as Proxy; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::Bounded; const SEED: u32 = 0; @@ -41,7 +41,7 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), RawOrigin::Signed(caller.clone()).into(), real, T::ProxyType::default(), - T::BlockNumber::zero(), + BlockNumberFor::::zero(), )?; } Ok(()) @@ -64,7 +64,7 @@ fn add_announcements( RawOrigin::Signed(real.clone()).into(), caller_lookup, T::ProxyType::default(), - T::BlockNumber::zero(), + BlockNumberFor::::zero(), )?; real }; @@ -187,7 +187,7 @@ benchmarks! { RawOrigin::Signed(caller.clone()), real, T::ProxyType::default(), - T::BlockNumber::zero() + BlockNumberFor::::zero() ) verify { let (proxies, _) = Proxies::::get(caller); @@ -202,7 +202,7 @@ benchmarks! { RawOrigin::Signed(caller.clone()), delegate, T::ProxyType::default(), - T::BlockNumber::zero() + BlockNumberFor::::zero() ) verify { let (proxies, _) = Proxies::::get(caller); @@ -224,7 +224,7 @@ benchmarks! { }: _( RawOrigin::Signed(caller.clone()), T::ProxyType::default(), - T::BlockNumber::zero(), + BlockNumberFor::::zero(), 0 ) verify { @@ -246,7 +246,7 @@ benchmarks! { Pallet::::create_pure( RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), - T::BlockNumber::zero(), + BlockNumberFor::::zero(), 0 )?; let height = system::Pallet::::block_number(); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 023d0253519f7..33e9fcfade35a 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -40,7 +40,8 @@ use frame_support::{ traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, RuntimeDebug, }; -use frame_system::{self as system}; +use frame_system::{self as system, ensure_signed, pallet_prelude::BlockNumberFor}; +pub use pallet::*; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ @@ -50,8 +51,6 @@ use sp_runtime::{ use sp_std::prelude::*; pub use weights::WeightInfo; -pub use pallet::*; - type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = @@ -228,7 +227,7 @@ pub mod pallet { origin: OriginFor, delegate: AccountIdLookupOf, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> DispatchResult { let who = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -248,7 +247,7 @@ pub mod pallet { origin: OriginFor, delegate: AccountIdLookupOf, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> DispatchResult { let who = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -265,9 +264,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get()))] pub fn remove_proxies(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - let (_, old_deposit) = Proxies::::take(&who); - T::Currency::unreserve(&who, old_deposit); - + Self::remove_all_proxy_delegates(&who); Ok(()) } @@ -294,7 +291,7 @@ pub mod pallet { pub fn create_pure( origin: OriginFor, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, index: u16, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -344,7 +341,7 @@ pub mod pallet { spawner: AccountIdLookupOf, proxy_type: T::ProxyType, index: u16, - #[pallet::compact] height: T::BlockNumber, + #[pallet::compact] height: BlockNumberFor, #[pallet::compact] ext_index: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -538,14 +535,14 @@ pub mod pallet { delegator: T::AccountId, delegatee: T::AccountId, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, }, /// A proxy was removed. ProxyRemoved { delegator: T::AccountId, delegatee: T::AccountId, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, }, } @@ -578,7 +575,10 @@ pub mod pallet { Twox64Concat, T::AccountId, ( - BoundedVec, T::MaxProxies>, + BoundedVec< + ProxyDefinition>, + T::MaxProxies, + >, BalanceOf, ), ValueQuery, @@ -592,7 +592,7 @@ pub mod pallet { Twox64Concat, T::AccountId, ( - BoundedVec, T::BlockNumber>, T::MaxPending>, + BoundedVec, BlockNumberFor>, T::MaxPending>, BalanceOf, ), ValueQuery, @@ -615,7 +615,7 @@ impl Pallet { who: &T::AccountId, proxy_type: &T::ProxyType, index: u16, - maybe_when: Option<(T::BlockNumber, u32)>, + maybe_when: Option<(BlockNumberFor, u32)>, ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( @@ -641,7 +641,7 @@ impl Pallet { delegator: &T::AccountId, delegatee: T::AccountId, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> DispatchResult { ensure!(delegator != &delegatee, Error::::NoSelfProxy); Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { @@ -681,7 +681,7 @@ impl Pallet { delegator: &T::AccountId, delegatee: T::AccountId, proxy_type: T::ProxyType, - delay: T::BlockNumber, + delay: BlockNumberFor, ) -> DispatchResult { Proxies::::try_mutate_exists(delegator, |x| { let (mut proxies, old_deposit) = x.take().ok_or(Error::::NotFound)?; @@ -737,7 +737,7 @@ impl Pallet { } fn edit_announcements< - F: FnMut(&Announcement, T::BlockNumber>) -> bool, + F: FnMut(&Announcement, BlockNumberFor>) -> bool, >( delegate: &T::AccountId, f: F, @@ -763,8 +763,8 @@ impl Pallet { real: &T::AccountId, delegate: &T::AccountId, force_proxy_type: Option, - ) -> Result, DispatchError> { - let f = |x: &ProxyDefinition| -> bool { + ) -> Result>, DispatchError> { + let f = |x: &ProxyDefinition>| -> bool { &x.delegate == delegate && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; @@ -772,7 +772,7 @@ impl Pallet { } fn do_proxy( - def: ProxyDefinition, + def: ProxyDefinition>, real: T::AccountId, call: ::RuntimeCall, ) { @@ -799,4 +799,13 @@ impl Pallet { let e = call.dispatch(origin); Self::deposit_event(Event::ProxyExecuted { result: e.map(|_| ()).map_err(|e| e.error) }); } + + /// Removes all proxy delegates for a given delegator. + /// + /// Parameters: + /// - `delegator`: The delegator account. + pub fn remove_all_proxy_delegates(delegator: &T::AccountId) { + let (_, old_deposit) = Proxies::::take(&delegator); + T::Currency::unreserve(&delegator, old_deposit); + } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index f3771083c4dd4..4b055a3935b95 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -24,75 +24,50 @@ use super::*; use crate as proxy; use codec::{Decode, Encode}; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, dispatch::DispatchError, traits::{ConstU32, ConstU64, Contains}, RuntimeDebug, }; use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; +use sp_runtime::{traits::BlakeTwo256, BuildStorage}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Proxy: proxy::{Pallet, Call, Storage, Event}, Utility: pallet_utility::{Pallet, Call, Event}, } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = BaseFilter; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); + type Block = Block; + type BlockHashCount = ConstU64<250>; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + + type BaseCallFilter = BaseFilter; + type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = (); + type ReserveIdentifier = [u8; 8]; type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type HoldIdentifier = (); - type MaxHolds = (); + type ExistentialDeposit = ConstU64<1>; } + impl pallet_utility::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -174,7 +149,7 @@ use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 5a6352fc7ed7f..f30fe73d27ae6 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_proxy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_proxy. pub trait WeightInfo { @@ -70,10 +74,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 16_542_000 picoseconds. - Weight::from_parts(17_131_651, 4706) - // Standard Error: 1_279 - .saturating_add(Weight::from_parts(31_622, 0).saturating_mul(p.into())) + // Minimum execution time: 15_182_000 picoseconds. + Weight::from_parts(15_919_146, 4706) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Proxy Proxies (r:1 w:0) @@ -88,12 +92,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `488 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_702_000 picoseconds. - Weight::from_parts(41_868_091, 5698) - // Standard Error: 3_771 - .saturating_add(Weight::from_parts(135_604, 0).saturating_mul(a.into())) - // Standard Error: 3_896 - .saturating_add(Weight::from_parts(32_615, 0).saturating_mul(p.into())) + // Minimum execution time: 40_256_000 picoseconds. + Weight::from_parts(40_373_648, 5698) + // Standard Error: 3_978 + .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) + // Standard Error: 4_110 + .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -103,14 +107,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_432_000 picoseconds. - Weight::from_parts(26_301_674, 5698) - // Standard Error: 1_413 - .saturating_add(Weight::from_parts(167_176, 0).saturating_mul(a.into())) + // Minimum execution time: 25_040_000 picoseconds. + Weight::from_parts(25_112_188, 5698) + // Standard Error: 2_143 + .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,12 +130,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_280_000 picoseconds. - Weight::from_parts(26_099_549, 5698) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(168_724, 0).saturating_mul(a.into())) - // Standard Error: 1_507 - .saturating_add(Weight::from_parts(2_212, 0).saturating_mul(p.into())) + // Minimum execution time: 24_884_000 picoseconds. + Weight::from_parts(25_359_291, 5698) + // Standard Error: 2_019 + .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) + // Standard Error: 2_086 + .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -145,12 +151,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 35_889_000 picoseconds. - Weight::from_parts(37_535_424, 5698) - // Standard Error: 3_899 - .saturating_add(Weight::from_parts(138_757, 0).saturating_mul(a.into())) - // Standard Error: 4_028 - .saturating_add(Weight::from_parts(46_196, 0).saturating_mul(p.into())) + // Minimum execution time: 35_039_000 picoseconds. + Weight::from_parts(36_727_868, 5698) + // Standard Error: 4_463 + .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) + // Standard Error: 4_611 + .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -161,10 +167,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 26_876_000 picoseconds. - Weight::from_parts(27_356_694, 4706) - // Standard Error: 1_437 - .saturating_add(Weight::from_parts(68_994, 0).saturating_mul(p.into())) + // Minimum execution time: 25_697_000 picoseconds. + Weight::from_parts(26_611_090, 4706) + // Standard Error: 2_306 + .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -175,10 +181,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 26_655_000 picoseconds. - Weight::from_parts(27_726_692, 4706) - // Standard Error: 1_980 - .saturating_add(Weight::from_parts(55_932, 0).saturating_mul(p.into())) + // Minimum execution time: 25_638_000 picoseconds. + Weight::from_parts(26_904_510, 4706) + // Standard Error: 2_669 + .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -189,22 +195,24 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_716_000 picoseconds. - Weight::from_parts(24_660_737, 4706) - // Standard Error: 1_400 - .saturating_add(Weight::from_parts(31_679, 0).saturating_mul(p.into())) + // Minimum execution time: 22_737_000 picoseconds. + Weight::from_parts(23_618_441, 4706) + // Standard Error: 1_729 + .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Proxy Proxies (r:1 w:1) /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) /// The range of component `p` is `[1, 31]`. - fn create_pure(_p: u32, ) -> Weight { + fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 28_233_000 picoseconds. - Weight::from_parts(29_602_422, 4706) + // Minimum execution time: 27_364_000 picoseconds. + Weight::from_parts(28_632_271, 4706) + // Standard Error: 1_613 + .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -215,10 +223,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_759_000 picoseconds. - Weight::from_parts(25_533_053, 4706) - // Standard Error: 1_254 - .saturating_add(Weight::from_parts(36_331, 0).saturating_mul(p.into())) + // Minimum execution time: 23_552_000 picoseconds. + Weight::from_parts(24_874_553, 4706) + // Standard Error: 1_919 + .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,10 +241,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 16_542_000 picoseconds. - Weight::from_parts(17_131_651, 4706) - // Standard Error: 1_279 - .saturating_add(Weight::from_parts(31_622, 0).saturating_mul(p.into())) + // Minimum execution time: 15_182_000 picoseconds. + Weight::from_parts(15_919_146, 4706) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Proxy Proxies (r:1 w:0) @@ -251,12 +259,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `488 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_702_000 picoseconds. - Weight::from_parts(41_868_091, 5698) - // Standard Error: 3_771 - .saturating_add(Weight::from_parts(135_604, 0).saturating_mul(a.into())) - // Standard Error: 3_896 - .saturating_add(Weight::from_parts(32_615, 0).saturating_mul(p.into())) + // Minimum execution time: 40_256_000 picoseconds. + Weight::from_parts(40_373_648, 5698) + // Standard Error: 3_978 + .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) + // Standard Error: 4_110 + .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -266,14 +274,16 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_432_000 picoseconds. - Weight::from_parts(26_301_674, 5698) - // Standard Error: 1_413 - .saturating_add(Weight::from_parts(167_176, 0).saturating_mul(a.into())) + // Minimum execution time: 25_040_000 picoseconds. + Weight::from_parts(25_112_188, 5698) + // Standard Error: 2_143 + .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -287,12 +297,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_280_000 picoseconds. - Weight::from_parts(26_099_549, 5698) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(168_724, 0).saturating_mul(a.into())) - // Standard Error: 1_507 - .saturating_add(Weight::from_parts(2_212, 0).saturating_mul(p.into())) + // Minimum execution time: 24_884_000 picoseconds. + Weight::from_parts(25_359_291, 5698) + // Standard Error: 2_019 + .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) + // Standard Error: 2_086 + .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -308,12 +318,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 35_889_000 picoseconds. - Weight::from_parts(37_535_424, 5698) - // Standard Error: 3_899 - .saturating_add(Weight::from_parts(138_757, 0).saturating_mul(a.into())) - // Standard Error: 4_028 - .saturating_add(Weight::from_parts(46_196, 0).saturating_mul(p.into())) + // Minimum execution time: 35_039_000 picoseconds. + Weight::from_parts(36_727_868, 5698) + // Standard Error: 4_463 + .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) + // Standard Error: 4_611 + .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -324,10 +334,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 26_876_000 picoseconds. - Weight::from_parts(27_356_694, 4706) - // Standard Error: 1_437 - .saturating_add(Weight::from_parts(68_994, 0).saturating_mul(p.into())) + // Minimum execution time: 25_697_000 picoseconds. + Weight::from_parts(26_611_090, 4706) + // Standard Error: 2_306 + .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -338,10 +348,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 26_655_000 picoseconds. - Weight::from_parts(27_726_692, 4706) - // Standard Error: 1_980 - .saturating_add(Weight::from_parts(55_932, 0).saturating_mul(p.into())) + // Minimum execution time: 25_638_000 picoseconds. + Weight::from_parts(26_904_510, 4706) + // Standard Error: 2_669 + .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -352,22 +362,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_716_000 picoseconds. - Weight::from_parts(24_660_737, 4706) - // Standard Error: 1_400 - .saturating_add(Weight::from_parts(31_679, 0).saturating_mul(p.into())) + // Minimum execution time: 22_737_000 picoseconds. + Weight::from_parts(23_618_441, 4706) + // Standard Error: 1_729 + .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Proxy Proxies (r:1 w:1) /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) /// The range of component `p` is `[1, 31]`. - fn create_pure(_p: u32, ) -> Weight { + fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 28_233_000 picoseconds. - Weight::from_parts(29_602_422, 4706) + // Minimum execution time: 27_364_000 picoseconds. + Weight::from_parts(28_632_271, 4706) + // Standard Error: 1_613 + .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -378,10 +390,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_759_000 picoseconds. - Weight::from_parts(25_533_053, 4706) - // Standard Error: 1_254 - .saturating_add(Weight::from_parts(36_331, 0).saturating_mul(p.into())) + // Minimum execution time: 23_552_000 picoseconds. + Weight::from_parts(24_874_553, 4706) + // Standard Error: 1_919 + .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/ranked-collective/Cargo.toml b/frame/ranked-collective/Cargo.toml index e19aaa4439716..0a20b4eb97ca1 100644 --- a/frame/ranked-collective/Cargo.toml +++ b/frame/ranked-collective/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -46,4 +46,8 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 6296403d2a1ce..fe1308cd034f4 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -53,8 +53,8 @@ use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, dispatch::{DispatchError, DispatchResultWithPostInfo, PostDispatchInfo}, - ensure, - traits::{EnsureOrigin, PollStatus, Polling, RankedMembers, VoteTally}, + ensure, impl_ensure_origin_with_arg_ignoring_arg, + traits::{EnsureOrigin, EnsureOriginWithArg, PollStatus, Polling, RankedMembers, VoteTally}, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -263,7 +263,7 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - let who = frame_system::EnsureSigned::try_origin(o)?; + let who = as EnsureOrigin<_>>::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(rank), _ => Err(frame_system::RawOrigin::Signed(who).into()), @@ -272,7 +272,36 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - EnsureRankedMember::::try_successful_origin() + as EnsureOrigin<_>>::try_successful_origin() + } +} + +impl_ensure_origin_with_arg_ignoring_arg! { + impl<{ T: Config, I: 'static, const MIN_RANK: u16, A }> + EnsureOriginWithArg for EnsureRanked + {} +} + +/// Guard to ensure that the given origin is a member of the collective. The rank of the member is +/// the `Success` value. +pub struct EnsureOfRank(PhantomData<(T, I)>); +impl, I: 'static> EnsureOriginWithArg for EnsureOfRank { + type Success = (T::AccountId, Rank); + + fn try_origin(o: T::RuntimeOrigin, min_rank: &Rank) -> Result { + let who = as EnsureOrigin<_>>::try_origin(o)?; + match Members::::get(&who) { + Some(MemberRecord { rank, .. }) if rank >= *min_rank => Ok((who, rank)), + _ => Err(frame_system::RawOrigin::Signed(who).into()), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(min_rank: &Rank) -> Result { + let who = frame_benchmarking::account::("successful_origin", 0, 0); + crate::Pallet::::do_add_member_to_rank(who.clone(), *min_rank) + .expect("Could not add members for benchmarks"); + Ok(frame_system::RawOrigin::Signed(who).into()) } } @@ -285,7 +314,7 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - let who = frame_system::EnsureSigned::try_origin(o)?; + let who = as EnsureOrigin<_>>::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(who), _ => Err(frame_system::RawOrigin::Signed(who).into()), @@ -294,10 +323,16 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - EnsureRankedMember::::try_successful_origin() + as EnsureOrigin<_>>::try_successful_origin() } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl<{ T: Config, I: 'static, const MIN_RANK: u16, A }> + EnsureOriginWithArg for EnsureMember + {} +} + /// Guard to ensure that the given origin is a member of the collective. The pair of both the /// account ID and the rank of the member is the `Success` value. pub struct EnsureRankedMember(PhantomData<(T, I)>); @@ -307,7 +342,7 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { - let who = frame_system::EnsureSigned::try_origin(o)?; + let who = as EnsureOrigin<_>>::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok((who, rank)), _ => Err(frame_system::RawOrigin::Signed(who).into()), @@ -323,6 +358,12 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin, I: 'static, const MIN_RANK: u16, A }> + EnsureOriginWithArg for EnsureRankedMember + {} +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -350,7 +391,7 @@ pub mod pallet { type DemoteOrigin: EnsureOrigin; /// The polling system used for our voting. - type Polls: Polling, Votes = Votes, Moment = Self::BlockNumber>; + type Polls: Polling, Votes = Votes, Moment = BlockNumberFor>; /// Convert the tally class into the minimum rank required to vote on the poll. If /// `Polls::Class` is the same type as `Rank`, then `Identity` can be used here to mean @@ -708,6 +749,15 @@ pub mod pallet { } Ok(()) } + + /// Determine the rank of the account behind the `Signed` origin `o`, `None` if the account + /// is unknown to this collective or `o` is not `Signed`. + pub fn as_rank( + o: &::PalletsOrigin, + ) -> Option { + use frame_support::traits::CallerTrait; + o.as_signed().and_then(Self::rank_of) + } } impl, I: 'static> RankedMembers for Pallet { diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 04519bc0f8e22..ba8c5a0f937ba 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -27,24 +27,20 @@ use frame_support::{ }; use sp_core::{Get, H256}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup, ReduceBy}, + BuildStorage, }; use super::*; use crate as pallet_ranked_collective; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type Class = Rank; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Club: pallet_ranked_collective::{Pallet, Call, Storage, Event}, } ); @@ -55,14 +51,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -199,7 +194,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -464,34 +459,52 @@ fn ensure_ranked_works() { type Rank2 = EnsureRanked; type Rank3 = EnsureRanked; type Rank4 = EnsureRanked; - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(1)).unwrap(), 1); - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!(>::try_origin(RuntimeOrigin::signed(1)).unwrap(), 1); + assert_eq!(>::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); + assert_eq!(>::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); assert_eq!( - Rank2::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(1)) + .unwrap_err() + .into_signer() + .unwrap(), 1 ); - assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); - assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!(>::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); + assert_eq!(>::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); assert_eq!( - Rank3::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(1)) + .unwrap_err() + .into_signer() + .unwrap(), 1 ); assert_eq!( - Rank3::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(2)) + .unwrap_err() + .into_signer() + .unwrap(), 2 ); - assert_eq!(Rank3::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!(>::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(1)) + .unwrap_err() + .into_signer() + .unwrap(), 1 ); assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(2)) + .unwrap_err() + .into_signer() + .unwrap(), 2 ); assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(3)).unwrap_err().as_signed().unwrap(), + >::try_origin(RuntimeOrigin::signed(3)) + .unwrap_err() + .into_signer() + .unwrap(), 3 ); }); diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 754fcf70aaafc..9f1a0a8180446 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_ranked_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_ranked_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_ranked_collective. pub trait WeightInfo { @@ -69,10 +73,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn add_member() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3507` - // Minimum execution time: 18_480_000 picoseconds. - Weight::from_parts(18_769_000, 3507) + // Minimum execution time: 17_245_000 picoseconds. + Weight::from_parts(17_930_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,12 +91,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `583 + r * (281 ±0)` + // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 30_087_000 picoseconds. - Weight::from_parts(33_646_239, 3519) - // Standard Error: 22_498 - .saturating_add(Weight::from_parts(12_524_289, 0).saturating_mul(r.into())) + // Minimum execution time: 29_534_000 picoseconds. + Weight::from_parts(32_847_495, 3519) + // Standard Error: 24_211 + .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -110,12 +114,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281 + r * (17 ±0)` + // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_974_000 picoseconds. - Weight::from_parts(21_582_135, 3507) - // Standard Error: 4_965 - .saturating_add(Weight::from_parts(294_566, 0).saturating_mul(r.into())) + // Minimum execution time: 20_333_000 picoseconds. + Weight::from_parts(21_592_224, 3507) + // Standard Error: 6_423 + .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -130,12 +134,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `599 + r * (72 ±0)` + // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_621_000 picoseconds. - Weight::from_parts(32_118_301, 3519) - // Standard Error: 27_596 - .saturating_add(Weight::from_parts(647_979, 0).saturating_mul(r.into())) + // Minimum execution time: 29_446_000 picoseconds. + Weight::from_parts(32_447_715, 3519) + // Standard Error: 28_791 + .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -149,10 +153,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `595` + // Measured: `628` // Estimated: `219984` - // Minimum execution time: 46_360_000 picoseconds. - Weight::from_parts(46_793_000, 219984) + // Minimum execution time: 45_474_000 picoseconds. + Weight::from_parts(47_228_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -165,12 +169,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `429 + n * (50 ±0)` + // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 14_869_000 picoseconds. - Weight::from_parts(18_545_013, 3795) - // Standard Error: 1_376 - .saturating_add(Weight::from_parts(1_005_397, 0).saturating_mul(n.into())) + // Minimum execution time: 13_903_000 picoseconds. + Weight::from_parts(18_209_102, 3795) + // Standard Error: 2_556 + .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -190,10 +194,10 @@ impl WeightInfo for () { /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) fn add_member() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3507` - // Minimum execution time: 18_480_000 picoseconds. - Weight::from_parts(18_769_000, 3507) + // Minimum execution time: 17_245_000 picoseconds. + Weight::from_parts(17_930_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -208,12 +212,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `583 + r * (281 ±0)` + // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 30_087_000 picoseconds. - Weight::from_parts(33_646_239, 3519) - // Standard Error: 22_498 - .saturating_add(Weight::from_parts(12_524_289, 0).saturating_mul(r.into())) + // Minimum execution time: 29_534_000 picoseconds. + Weight::from_parts(32_847_495, 3519) + // Standard Error: 24_211 + .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -231,12 +235,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281 + r * (17 ±0)` + // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_974_000 picoseconds. - Weight::from_parts(21_582_135, 3507) - // Standard Error: 4_965 - .saturating_add(Weight::from_parts(294_566, 0).saturating_mul(r.into())) + // Minimum execution time: 20_333_000 picoseconds. + Weight::from_parts(21_592_224, 3507) + // Standard Error: 6_423 + .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -251,12 +255,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `599 + r * (72 ±0)` + // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_621_000 picoseconds. - Weight::from_parts(32_118_301, 3519) - // Standard Error: 27_596 - .saturating_add(Weight::from_parts(647_979, 0).saturating_mul(r.into())) + // Minimum execution time: 29_446_000 picoseconds. + Weight::from_parts(32_447_715, 3519) + // Standard Error: 28_791 + .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -270,10 +274,10 @@ impl WeightInfo for () { /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn vote() -> Weight { // Proof Size summary in bytes: - // Measured: `595` + // Measured: `628` // Estimated: `219984` - // Minimum execution time: 46_360_000 picoseconds. - Weight::from_parts(46_793_000, 219984) + // Minimum execution time: 45_474_000 picoseconds. + Weight::from_parts(47_228_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -286,12 +290,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `429 + n * (50 ±0)` + // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 14_869_000 picoseconds. - Weight::from_parts(18_545_013, 3795) - // Standard Error: 1_376 - .saturating_add(Weight::from_parts(1_005_397, 0).saturating_mul(n.into())) + // Minimum execution time: 13_903_000 picoseconds. + Weight::from_parts(18_209_102, 3795) + // Standard Error: 2_556 + .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index b2e3236e4dfc5..14d32866e0101 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -13,18 +13,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -33,6 +33,8 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" ] std = [ "codec/std", @@ -43,5 +45,12 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index d66b5725fd4f7..9c01d25d4f76b 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -48,34 +48,30 @@ //! ### Recovery Life Cycle //! //! The intended life cycle of a successful recovery takes the following steps: -//! 1. The account owner calls `create_recovery` to set up a recovery configuration -//! for their account. -//! 2. At some later time, the account owner loses access to their account and wants -//! to recover it. Likely, they will need to create a new account and fund it with -//! enough balance to support the transaction fees and the deposit for the -//! recovery process. -//! 3. Using this new account, they call `initiate_recovery`. -//! 4. Then the account owner would contact their configured friends to vouch for -//! the recovery attempt. The account owner would provide their old account id -//! and the new account id, and friends would call `vouch_recovery` with those -//! parameters. -//! 5. Once a threshold number of friends have vouched for the recovery attempt, -//! the account owner needs to wait until the delay period has passed, starting -//! when they initiated the recovery process. -//! 6. Now the account owner is able to call `claim_recovery`, which subsequently -//! allows them to call `as_recovered` and directly make calls on-behalf-of the lost +//! 1. The account owner calls `create_recovery` to set up a recovery configuration for their //! account. -//! 7. Using the now recovered account, the account owner can call `close_recovery` -//! on the recovery process they opened, reclaiming the recovery deposit they -//! placed. +//! 2. At some later time, the account owner loses access to their account and wants to recover it. +//! Likely, they will need to create a new account and fund it with enough balance to support the +//! transaction fees and the deposit for the recovery process. +//! 3. Using this new account, they call `initiate_recovery`. +//! 4. Then the account owner would contact their configured friends to vouch for the recovery +//! attempt. The account owner would provide their old account id and the new account id, and +//! friends would call `vouch_recovery` with those parameters. +//! 5. Once a threshold number of friends have vouched for the recovery attempt, the account owner +//! needs to wait until the delay period has passed, starting when they initiated the recovery +//! process. +//! 6. Now the account owner is able to call `claim_recovery`, which subsequently allows them to +//! call `as_recovered` and directly make calls on-behalf-of the lost account. +//! 7. Using the now recovered account, the account owner can call `close_recovery` on the recovery +//! process they opened, reclaiming the recovery deposit they placed. //! 8. Then the account owner should then call `remove_recovery` to remove the recovery -//! configuration on the recovered account and reclaim the recovery configuration -//! deposit they placed. -//! 9. Using `as_recovered`, the account owner is able to call any other pallets -//! to clean up their state and reclaim any reserved or locked funds. They -//! can then transfer all funds from the recovered account to the new account. -//! 10. When the recovered account becomes reaped (i.e. its free and reserved -//! balance drops to zero), the final recovery link is removed. +//! configuration on the recovered account and reclaim the recovery configuration deposit they +//! placed. +//! 9. Using `as_recovered`, the account owner is able to call any other pallets to clean up their +//! state and reclaim any reserved or locked funds. They can then transfer all funds from the +//! recovered account to the new account. +//! 10. When the recovered account becomes reaped (i.e. its free and reserved balance drops to +//! zero), the final recovery link is removed. //! //! ### Malicious Recovery Attempts //! @@ -338,7 +334,7 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - RecoveryConfig, FriendsOf>, + RecoveryConfig, BalanceOf, FriendsOf>, >; /// Active recovery attempts. @@ -353,7 +349,7 @@ pub mod pallet { T::AccountId, Twox64Concat, T::AccountId, - ActiveRecovery, FriendsOf>, + ActiveRecovery, BalanceOf, FriendsOf>, >; /// The list of allowed proxy accounts. @@ -444,7 +440,7 @@ pub mod pallet { origin: OriginFor, friends: Vec, threshold: u16, - delay_period: T::BlockNumber, + delay_period: BlockNumberFor, ) -> DispatchResult { let who = ensure_signed(origin)?; // Check account is not already set up for recovery diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 5c190e2a241a5..2f2bd866a7198 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -26,20 +26,16 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Recovery: recovery::{Pallet, Call, Storage, Event}, } @@ -52,13 +48,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -88,7 +83,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -115,7 +110,7 @@ pub type BalancesCall = pallet_balances::Call; pub type RecoveryCall = super::Call; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], } diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 97d4c8b87aa53..84b19ae694eec 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_recovery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_recovery +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_recovery. pub trait WeightInfo { @@ -68,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3545` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(10_807_000, 3545) + // Minimum execution time: 9_360_000 picoseconds. + Weight::from_parts(9_773_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Recovery Proxy (r:0 w:1) @@ -78,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_198_000 picoseconds. - Weight::from_parts(11_459_000, 0) + // Minimum execution time: 9_146_000 picoseconds. + Weight::from_parts(9_507_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Recovery Recoverable (r:1 w:1) @@ -89,10 +93,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `175` // Estimated: `3816` - // Minimum execution time: 28_009_000 picoseconds. - Weight::from_parts(28_755_652, 3816) - // Standard Error: 3_536 - .saturating_add(Weight::from_parts(78_348, 0).saturating_mul(n.into())) + // Minimum execution time: 26_472_000 picoseconds. + Weight::from_parts(27_917_651, 3816) + // Standard Error: 7_129 + .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,8 +108,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `272` // Estimated: `3854` - // Minimum execution time: 31_233_000 picoseconds. - Weight::from_parts(31_508_000, 3854) + // Minimum execution time: 29_618_000 picoseconds. + Weight::from_parts(30_192_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,10 +122,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `360 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 20_542_000 picoseconds. - Weight::from_parts(21_224_065, 3854) - // Standard Error: 3_018 - .saturating_add(Weight::from_parts(171_994, 0).saturating_mul(n.into())) + // Minimum execution time: 19_464_000 picoseconds. + Weight::from_parts(20_642_522, 3854) + // Standard Error: 5_974 + .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -136,10 +140,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `392 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 25_141_000 picoseconds. - Weight::from_parts(25_880_238, 3854) - // Standard Error: 3_156 - .saturating_add(Weight::from_parts(54_405, 0).saturating_mul(n.into())) + // Minimum execution time: 23_656_000 picoseconds. + Weight::from_parts(24_903_269, 3854) + // Standard Error: 5_771 + .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,10 +156,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `513 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 35_314_000 picoseconds. - Weight::from_parts(36_380_338, 3854) - // Standard Error: 7_396 - .saturating_add(Weight::from_parts(3_861, 0).saturating_mul(n.into())) + // Minimum execution time: 34_866_000 picoseconds. + Weight::from_parts(36_368_748, 3854) + // Standard Error: 6_600 + .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -168,10 +172,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `270 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 33_453_000 picoseconds. - Weight::from_parts(34_078_626, 3854) - // Standard Error: 2_563 - .saturating_add(Weight::from_parts(78_179, 0).saturating_mul(n.into())) + // Minimum execution time: 31_405_000 picoseconds. + Weight::from_parts(32_552_838, 3854) + // Standard Error: 8_043 + .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -181,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3545` - // Minimum execution time: 12_196_000 picoseconds. - Weight::from_parts(12_580_000, 3545) + // Minimum execution time: 11_530_000 picoseconds. + Weight::from_parts(11_851_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -196,8 +200,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3545` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(10_807_000, 3545) + // Minimum execution time: 9_360_000 picoseconds. + Weight::from_parts(9_773_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Recovery Proxy (r:0 w:1) @@ -206,8 +210,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_198_000 picoseconds. - Weight::from_parts(11_459_000, 0) + // Minimum execution time: 9_146_000 picoseconds. + Weight::from_parts(9_507_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Recovery Recoverable (r:1 w:1) @@ -217,10 +221,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `175` // Estimated: `3816` - // Minimum execution time: 28_009_000 picoseconds. - Weight::from_parts(28_755_652, 3816) - // Standard Error: 3_536 - .saturating_add(Weight::from_parts(78_348, 0).saturating_mul(n.into())) + // Minimum execution time: 26_472_000 picoseconds. + Weight::from_parts(27_917_651, 3816) + // Standard Error: 7_129 + .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -232,8 +236,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `272` // Estimated: `3854` - // Minimum execution time: 31_233_000 picoseconds. - Weight::from_parts(31_508_000, 3854) + // Minimum execution time: 29_618_000 picoseconds. + Weight::from_parts(30_192_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -246,10 +250,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `360 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 20_542_000 picoseconds. - Weight::from_parts(21_224_065, 3854) - // Standard Error: 3_018 - .saturating_add(Weight::from_parts(171_994, 0).saturating_mul(n.into())) + // Minimum execution time: 19_464_000 picoseconds. + Weight::from_parts(20_642_522, 3854) + // Standard Error: 5_974 + .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -264,10 +268,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `392 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 25_141_000 picoseconds. - Weight::from_parts(25_880_238, 3854) - // Standard Error: 3_156 - .saturating_add(Weight::from_parts(54_405, 0).saturating_mul(n.into())) + // Minimum execution time: 23_656_000 picoseconds. + Weight::from_parts(24_903_269, 3854) + // Standard Error: 5_771 + .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -280,10 +284,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `513 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 35_314_000 picoseconds. - Weight::from_parts(36_380_338, 3854) - // Standard Error: 7_396 - .saturating_add(Weight::from_parts(3_861, 0).saturating_mul(n.into())) + // Minimum execution time: 34_866_000 picoseconds. + Weight::from_parts(36_368_748, 3854) + // Standard Error: 6_600 + .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -296,10 +300,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `270 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 33_453_000 picoseconds. - Weight::from_parts(34_078_626, 3854) - // Standard Error: 2_563 - .saturating_add(Weight::from_parts(78_179, 0).saturating_mul(n.into())) + // Minimum execution time: 31_405_000 picoseconds. + Weight::from_parts(32_552_838, 3854) + // Standard Error: 8_043 + .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -309,8 +313,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3545` - // Minimum execution time: 12_196_000 picoseconds. - Weight::from_parts(12_580_000, 3545) + // Minimum execution time: 11_530_000 picoseconds. + Weight::from_parts(11_851_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/referenda/Cargo.toml b/frame/referenda/Cargo.toml index 5a092c63beec0..a52d987806421 100644 --- a/frame/referenda/Cargo.toml +++ b/frame/referenda/Cargo.toml @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = { version = "1.5", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +serde = { version = "1.0.163", features = ["derive"], optional = true } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -33,7 +33,7 @@ assert_matches = { version = "1.5" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -49,6 +49,10 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "pallet-preimage/std", + "pallet-scheduler/std", + "sp-core/std" ] runtime-benchmarks = [ "assert_matches", @@ -56,5 +60,16 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-preimage/try-runtime", + "pallet-scheduler/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 288c65feae567..78d14bd99d2ee 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_benchmarking::v1::{ use frame_support::{ assert_ok, dispatch::UnfilteredDispatchable, - traits::{Bounded, Currency, EnsureOrigin}, + traits::{Bounded, Currency, EnsureOrigin, EnsureOriginWithArg}, }; use frame_system::RawOrigin; use sp_runtime::traits::Bounded as ArithBounded; @@ -172,7 +172,9 @@ fn skip_timeout_period, I: 'static>(index: ReferendumIndex) { frame_system::Pallet::::set_block_number(timeout_period_over); } -fn alarm_time, I: 'static>(index: ReferendumIndex) -> T::BlockNumber { +fn alarm_time, I: 'static>( + index: ReferendumIndex, +) -> frame_system::pallet_prelude::BlockNumberFor { let status = Referenda::::ensure_ongoing(index).unwrap(); status.alarm.unwrap().0 } @@ -196,7 +198,7 @@ fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { benchmarks_instance_pallet! { submit { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); whitelist_account!(caller); @@ -213,7 +215,7 @@ benchmarks_instance_pallet! { place_decision_deposit_preparing { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); }: place_decision_deposit(origin, index) verify { @@ -222,7 +224,7 @@ benchmarks_instance_pallet! { place_decision_deposit_queued { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); fill_queue::(origin.clone(), index, 1, 90); }: place_decision_deposit(origin, index) @@ -234,7 +236,7 @@ benchmarks_instance_pallet! { place_decision_deposit_not_queued { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); fill_queue::(origin.clone(), index, 0, 90); let track = Referenda::::ensure_ongoing(index).unwrap().track; @@ -248,7 +250,7 @@ benchmarks_instance_pallet! { place_decision_deposit_passing { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); skip_prepare_period::(index); make_passing::(index); @@ -259,7 +261,7 @@ benchmarks_instance_pallet! { place_decision_deposit_failing { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); skip_prepare_period::(index); }: place_decision_deposit(origin, index) @@ -269,7 +271,7 @@ benchmarks_instance_pallet! { refund_decision_deposit { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); place_deposit::(index); assert_ok!(Referenda::::cancel( @@ -284,7 +286,7 @@ benchmarks_instance_pallet! { refund_submission_deposit { let origin = - T::SubmitOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()).map_err(|_| BenchmarkError::Weightless)?; let index = create_referendum::(origin.clone()); let caller = frame_system::ensure_signed(origin.clone()).unwrap(); let balance = T::Currency::free_balance(&caller); @@ -303,7 +305,7 @@ benchmarks_instance_pallet! { } cancel { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -315,7 +317,7 @@ benchmarks_instance_pallet! { } kill { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -327,7 +329,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_queue_empty { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -346,7 +348,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_failing { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); // No spaces free in the queue. @@ -371,7 +373,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_passing { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); // No spaces free in the queue. @@ -396,7 +398,7 @@ benchmarks_instance_pallet! { } nudge_referendum_requeued_insertion { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); // First create our referendum and place the deposit. It will be failing. let index = create_referendum::(origin.clone()); @@ -419,7 +421,7 @@ benchmarks_instance_pallet! { } nudge_referendum_requeued_slide { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); // First create our referendum and place the deposit. It will be failing. let index = create_referendum::(origin.clone()); @@ -447,7 +449,7 @@ benchmarks_instance_pallet! { // free and this failing. It would result in `QUEUE_SIZE - 1` items being shifted for the // insertion at the beginning. - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); // First create our referendum and place the deposit. It will be failing. let index = create_referendum::(origin.clone()); @@ -466,7 +468,7 @@ benchmarks_instance_pallet! { } nudge_referendum_not_queued { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); // First create our referendum and place the deposit. It will be failing. let index = create_referendum::(origin.clone()); @@ -485,7 +487,7 @@ benchmarks_instance_pallet! { } nudge_referendum_no_deposit { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); skip_prepare_period::(index); @@ -496,7 +498,7 @@ benchmarks_instance_pallet! { } nudge_referendum_preparing { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -507,7 +509,7 @@ benchmarks_instance_pallet! { } nudge_referendum_timed_out { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); skip_timeout_period::(index); @@ -518,7 +520,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_failing { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -529,7 +531,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_passing { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -541,7 +543,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_confirming { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -555,7 +557,7 @@ benchmarks_instance_pallet! { } nudge_referendum_end_confirming { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -570,7 +572,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_not_confirming { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -586,7 +588,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_confirming { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -601,7 +603,7 @@ benchmarks_instance_pallet! { } nudge_referendum_approved { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -616,7 +618,7 @@ benchmarks_instance_pallet! { } nudge_referendum_rejected { - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin); place_deposit::(index); @@ -632,7 +634,7 @@ benchmarks_instance_pallet! { set_some_metadata { use sp_std::borrow::Cow; - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); let hash = T::Preimages::note(Cow::from(vec![5, 6])).unwrap(); @@ -643,7 +645,7 @@ benchmarks_instance_pallet! { clear_metadata { use sp_std::borrow::Cow; - let origin = T::SubmitOrigin::try_successful_origin() + let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); let hash = T::Preimages::note(Cow::from(vec![6, 7, 8])).unwrap(); @@ -657,7 +659,7 @@ benchmarks_instance_pallet! { impl_benchmark_test_suite!( Referenda, - crate::mock::new_test_ext(), + crate::mock::ExtBuilder::default().build(), crate::mock::Test ); } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index dd352d0af40cb..d4dbbf8a3c998 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -66,6 +66,7 @@ use codec::{Codec, Encode}; use frame_support::{ + dispatch::DispatchResult, ensure, traits::{ schedule::{ @@ -77,6 +78,7 @@ use frame_support::{ }, BoundedVec, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Bounded, Dispatchable, One, Saturating, Zero}, @@ -138,7 +140,7 @@ const ASSEMBLY_ID: LockIdentifier = *b"assembly"; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, traits::EnsureOriginWithArg}; use frame_system::pallet_prelude::*; /// The current storage version. @@ -161,13 +163,17 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon, PalletsOriginOf> - + ScheduleNamed, PalletsOriginOf>; + type Scheduler: ScheduleAnon, CallOf, PalletsOriginOf> + + ScheduleNamed, CallOf, PalletsOriginOf>; /// Currency type for this pallet. type Currency: ReservableCurrency; // Origins and unbalances. /// Origin from which proposals may be submitted. - type SubmitOrigin: EnsureOrigin; + type SubmitOrigin: EnsureOriginWithArg< + Self::RuntimeOrigin, + PalletsOriginOf, + Success = Self::AccountId, + >; /// Origin from which any vote may be cancelled. type CancelOrigin: EnsureOrigin; /// Origin from which any vote may be killed. @@ -197,25 +203,25 @@ pub mod pallet { /// The number of blocks after submission that a referendum must begin being decided by. /// Once this passes, then anyone may cancel the referendum. #[pallet::constant] - type UndecidingTimeout: Get; + type UndecidingTimeout: Get>; /// Quantization level for the referendum wakeup scheduler. A higher number will result in /// fewer storage reads/writes needed for smaller voters, but also result in delays to the /// automatic referendum status changes. Explicit servicing instructions are unaffected. #[pallet::constant] - type AlarmInterval: Get; + type AlarmInterval: Get>; // The other stuff. /// Information concerning the different referendum tracks. #[pallet::constant] type Tracks: Get< Vec<( - , Self::BlockNumber>>::Id, - TrackInfo, Self::BlockNumber>, + , BlockNumberFor>>::Id, + TrackInfo, BlockNumberFor>, )>, > + TracksInfo< BalanceOf, - Self::BlockNumber, + BlockNumberFor, RuntimeOrigin = ::PalletsOrigin, >; @@ -411,6 +417,15 @@ pub mod pallet { PreimageNotExist, } + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state()?; + Ok(()) + } + } + #[pallet::call] impl, I: 'static> Pallet { /// Propose a referendum on a privileged action. @@ -428,9 +443,10 @@ pub mod pallet { origin: OriginFor, proposal_origin: Box>, proposal: BoundedCallOf, - enactment_moment: DispatchTime, + enactment_moment: DispatchTime>, ) -> DispatchResult { - let who = T::SubmitOrigin::ensure_origin(origin)?; + let proposal_origin = *proposal_origin; + let who = T::SubmitOrigin::ensure_origin(origin, &proposal_origin)?; let track = T::Tracks::track_for(&proposal_origin).map_err(|_| Error::::NoTrack)?; @@ -445,7 +461,7 @@ pub mod pallet { T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index }))?; let status = ReferendumStatus { track, - origin: *proposal_origin, + origin: proposal_origin, proposal: proposal.clone(), enactment: enactment_moment, submitted: now, @@ -699,7 +715,7 @@ pub mod pallet { impl, I: 'static> Polling for Pallet { type Index = ReferendumIndex; type Votes = VotesOf; - type Moment = T::BlockNumber; + type Moment = BlockNumberFor; type Class = TrackIdOf; fn classes() -> Vec { @@ -708,7 +724,7 @@ impl, I: 'static> Polling for Pallet { fn access_poll( index: Self::Index, - f: impl FnOnce(PollStatus<&mut T::Tally, T::BlockNumber, TrackIdOf>) -> R, + f: impl FnOnce(PollStatus<&mut T::Tally, BlockNumberFor, TrackIdOf>) -> R, ) -> R { match ReferendumInfoFor::::get(index) { Some(ReferendumInfo::Ongoing(mut status)) => { @@ -727,7 +743,7 @@ impl, I: 'static> Polling for Pallet { fn try_access_poll( index: Self::Index, f: impl FnOnce( - PollStatus<&mut T::Tally, T::BlockNumber, TrackIdOf>, + PollStatus<&mut T::Tally, BlockNumberFor, TrackIdOf>, ) -> Result, ) -> Result { match ReferendumInfoFor::::get(index) { @@ -844,7 +860,7 @@ impl, I: 'static> Pallet { fn schedule_enactment( index: ReferendumIndex, track: &TrackInfoOf, - desired: DispatchTime, + desired: DispatchTime>, origin: PalletsOriginOf, call: BoundedCallOf, ) { @@ -866,8 +882,8 @@ impl, I: 'static> Pallet { /// Set an alarm to dispatch `call` at block number `when`. fn set_alarm( call: BoundedCallOf, - when: T::BlockNumber, - ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { + when: BlockNumberFor, + ) -> Option<(BlockNumberFor, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); // Alarm must go off no earlier than `when`. // This rounds `when` upwards to the next multiple of `alarm_interval`. @@ -900,9 +916,9 @@ impl, I: 'static> Pallet { fn begin_deciding( status: &mut ReferendumStatusOf, index: ReferendumIndex, - now: T::BlockNumber, + now: BlockNumberFor, track: &TrackInfoOf, - ) -> (Option, BeginDecidingBranch) { + ) -> (Option>, BeginDecidingBranch) { let is_passing = Self::is_passing( &status.tally, Zero::zero(), @@ -938,11 +954,11 @@ impl, I: 'static> Pallet { /// /// If `None`, then it is queued and should be nudged automatically as the queue gets drained. fn ready_for_deciding( - now: T::BlockNumber, + now: BlockNumberFor, track: &TrackInfoOf, index: ReferendumIndex, status: &mut ReferendumStatusOf, - ) -> (Option, ServiceBranch) { + ) -> (Option>, ServiceBranch) { let deciding_count = DecidingCount::::get(status.track); if deciding_count < track.max_deciding { // Begin deciding. @@ -999,7 +1015,7 @@ impl, I: 'static> Pallet { fn ensure_alarm_at( status: &mut ReferendumStatusOf, index: ReferendumIndex, - alarm: T::BlockNumber, + alarm: BlockNumberFor, ) -> bool { if status.alarm.as_ref().map_or(true, |&(when, _)| when != alarm) { // Either no alarm or one that was different @@ -1026,7 +1042,7 @@ impl, I: 'static> Pallet { /// - If it's ready to be decided, start deciding; /// - If it's not ready to be decided and non-deciding timeout has passed, fail; /// - If it's ongoing and passing, ensure confirming; if at end of confirmation period, pass. - /// - If it's ongoing and not passing, stop confirning; if it has reached end time, fail. + /// - If it's ongoing and not passing, stop confirming; if it has reached end time, fail. /// /// Weight will be a bit different depending on what it does, but it's designed so as not to /// differ dramatically, especially if `MaxQueue` is kept small. In particular _there are no @@ -1044,7 +1060,7 @@ impl, I: 'static> Pallet { /// `TrackQueue`. Basically this happens when a referendum is in the deciding queue and receives /// a vote, or when it moves into the deciding queue. fn service_referendum( - now: T::BlockNumber, + now: BlockNumberFor, index: ReferendumIndex, mut status: ReferendumStatusOf, ) -> (ReferendumInfoOf, bool, ServiceBranch) { @@ -1056,7 +1072,7 @@ impl, I: 'static> Pallet { }; // Default the alarm to the end of the world. let timeout = status.submitted + T::UndecidingTimeout::get(); - let mut alarm = T::BlockNumber::max_value(); + let mut alarm = BlockNumberFor::::max_value(); let branch; match &mut status.deciding { None => { @@ -1187,7 +1203,7 @@ impl, I: 'static> Pallet { }, } - let dirty_alarm = if alarm < T::BlockNumber::max_value() { + let dirty_alarm = if alarm < BlockNumberFor::::max_value() { Self::ensure_alarm_at(&mut status, index, alarm) } else { Self::ensure_no_alarm(&mut status) @@ -1202,7 +1218,7 @@ impl, I: 'static> Pallet { tally: &T::Tally, track_id: TrackIdOf, track: &TrackInfoOf, - ) -> T::BlockNumber { + ) -> BlockNumberFor { deciding.confirming.unwrap_or_else(|| { // Set alarm to the point where the current voting would make it pass. let approval = tally.approval(track_id); @@ -1261,8 +1277,8 @@ impl, I: 'static> Pallet { /// `approval_needed`. fn is_passing( tally: &T::Tally, - elapsed: T::BlockNumber, - period: T::BlockNumber, + elapsed: BlockNumberFor, + period: BlockNumberFor, support_needed: &Curve, approval_needed: &Curve, id: TrackIdOf, @@ -1278,4 +1294,87 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::::MetadataCleared { index, hash }); } } + + /// Ensure the correctness of the state of this pallet. + /// + /// The following assertions must always apply. + /// + /// General assertions: + /// + /// * [`ReferendumCount`] must always be equal to the number of referenda in + /// [`ReferendumInfoFor`]. + /// * Referendum indices in [`MetadataOf`] must also be stored in [`ReferendumInfoFor`]. + #[cfg(any(feature = "try-runtime", test))] + fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + ensure!( + ReferendumCount::::get() as usize == + ReferendumInfoFor::::iter_keys().count(), + "Number of referenda in `ReferendumInfoFor` is different than `ReferendumCount`" + ); + + MetadataOf::::iter_keys().try_for_each(|referendum_index| -> DispatchResult { + ensure!( + ReferendumInfoFor::::contains_key(referendum_index), + "Referendum indices in `MetadataOf` must also be stored in `ReferendumInfoOf`" + ); + Ok(()) + })?; + + Self::try_state_referenda_info()?; + Self::try_state_tracks()?; + + Ok(()) + } + + /// Looking at referenda info: + /// + /// - Data regarding ongoing phase: + /// + /// * There must exist track info for the track of the referendum. + /// * The deciding stage has to begin before confirmation period. + /// * If alarm is set the nudge call has to be at most [`UndecidingTimeout`] blocks away + /// from the submission block. + #[cfg(any(feature = "try-runtime", test))] + fn try_state_referenda_info() -> Result<(), sp_runtime::TryRuntimeError> { + ReferendumInfoFor::::iter().try_for_each(|(_, referendum)| { + match referendum { + ReferendumInfo::Ongoing(status) => { + ensure!( + Self::track(status.track).is_some(), + "No track info for the track of the referendum." + ); + + if let Some(deciding) = status.deciding { + ensure!( + deciding.since < + deciding.confirming.unwrap_or(BlockNumberFor::::max_value()), + "Deciding status cannot begin before confirming stage." + ) + } + }, + _ => {}, + } + Ok(()) + }) + } + + /// Looking at tracks: + /// + /// * The referendum indices stored in [`TrackQueue`] must exist as keys in the + /// [`ReferendumInfoFor`] storage map. + #[cfg(any(feature = "try-runtime", test))] + fn try_state_tracks() -> Result<(), sp_runtime::TryRuntimeError> { + T::Tracks::tracks().iter().try_for_each(|track| { + TrackQueue::::get(track.0).iter().try_for_each( + |(referendum_index, _)| -> Result<(), sp_runtime::TryRuntimeError> { + ensure!( + ReferendumInfoFor::::contains_key(referendum_index), + "`ReferendumIndex` inside the `TrackQueue` should be a key in `ReferendumInfoFor`" + ); + Ok(()) + }, + )?; + Ok(()) + }) + } } diff --git a/frame/referenda/src/migration.rs b/frame/referenda/src/migration.rs index c27ab452ac637..281da83d6569e 100644 --- a/frame/referenda/src/migration.rs +++ b/frame/referenda/src/migration.rs @@ -22,6 +22,9 @@ use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade}; use log; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + /// Initial version of storage types. pub mod v0 { use super::*; @@ -34,7 +37,7 @@ pub mod v0 { pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, - ::BlockNumber, + frame_system::pallet_prelude::BlockNumberFor, BoundedCallOf, BalanceOf, TallyOf, @@ -95,9 +98,9 @@ pub mod v1 { pub struct MigrateV0ToV1(PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for MigrateV0ToV1 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { let onchain_version = Pallet::::on_chain_storage_version(); - assert_eq!(onchain_version, 0, "migration from version 0 to 1."); + ensure!(onchain_version == 0, "migration from version 0 to 1."); let referendum_count = v0::ReferendumInfoFor::::iter().count(); log::info!( target: TARGET, @@ -147,16 +150,13 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { let onchain_version = Pallet::::on_chain_storage_version(); - assert_eq!(onchain_version, 1, "must upgrade from version 0 to 1."); + ensure!(onchain_version == 1, "must upgrade from version 0 to 1."); let pre_referendum_count: u32 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); let post_referendum_count = ReferendumInfoFor::::iter().count() as u32; - assert_eq!( - post_referendum_count, pre_referendum_count, - "must migrate all referendums." - ); + ensure!(post_referendum_count == pre_referendum_count, "must migrate all referendums."); log::info!(target: TARGET, "migrated all referendums."); Ok(()) } @@ -199,10 +199,11 @@ pub mod test { #[test] fn migration_v0_to_v1_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // create and insert into the storage an ongoing referendum v0. let status_v0 = create_status_v0(); let ongoing_v0 = v0::ReferendumInfoOf::::Ongoing(status_v0.clone()); + ReferendumCount::::mutate(|x| x.saturating_inc()); v0::ReferendumInfoFor::::insert(2, ongoing_v0); // create and insert into the storage an approved referendum v0. let approved_v0 = v0::ReferendumInfoOf::::Approved( @@ -210,6 +211,7 @@ pub mod test { Deposit { who: 1, amount: 10 }, Some(Deposit { who: 2, amount: 20 }), ); + ReferendumCount::::mutate(|x| x.saturating_inc()); v0::ReferendumInfoFor::::insert(5, approved_v0); // run migration from v0 to v1. v1::MigrateV0ToV1::::on_runtime_upgrade(); diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index cdedb79556f35..e44167ed561c5 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -31,19 +31,14 @@ use frame_support::{ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, Hash, IdentityLookup}, - DispatchResult, Perbill, + BuildStorage, DispatchResult, Perbill, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -70,14 +65,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -122,7 +116,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -231,23 +225,32 @@ impl Config for Test { type Tracks = TestTracksInfo; type Preimages = Preimage; } +pub struct ExtBuilder {} -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let balances = vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100)]; - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext +impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } } -/// Execute the function two times, with `true` and with `false`. -#[allow(dead_code)] -pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) { - new_test_ext().execute_with(|| (execute.clone())(false)); - new_test_ext().execute_with(|| execute(true)); +impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let balances = vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100)]; + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Referenda::do_try_state().unwrap(); + }) + } } #[derive(Encode, Debug, Decode, TypeInfo, Eq, PartialEq, Clone, MaxEncodedLen)] diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index 39f1945bf4f3b..c7469946c2dab 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -30,7 +30,7 @@ use pallet_balances::Error as BalancesError; #[test] fn params_should_work() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(ReferendumCount::::get(), 0); assert_eq!(Balances::free_balance(42), 0); assert_eq!(Balances::total_issuance(), 600); @@ -39,7 +39,7 @@ fn params_should_work() { #[test] fn basic_happy_path_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // #1: submit assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -75,7 +75,7 @@ fn basic_happy_path_works() { #[test] fn insta_confirm_then_kill_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Confirming { immediate: true }.create(); run_to(6); assert_ok!(Referenda::kill(RuntimeOrigin::root(), r)); @@ -85,7 +85,7 @@ fn insta_confirm_then_kill_works() { #[test] fn confirm_then_reconfirm_with_elapsed_trigger_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Confirming { immediate: false }.create(); assert_eq!(confirming_until(r), 8); run_to(7); @@ -99,7 +99,7 @@ fn confirm_then_reconfirm_with_elapsed_trigger_works() { #[test] fn instaconfirm_then_reconfirm_with_elapsed_trigger_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Confirming { immediate: true }.create(); run_to(6); assert_eq!(confirming_until(r), 7); @@ -113,7 +113,7 @@ fn instaconfirm_then_reconfirm_with_elapsed_trigger_works() { #[test] fn instaconfirm_then_reconfirm_with_voting_trigger_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Confirming { immediate: true }.create(); run_to(6); assert_eq!(confirming_until(r), 7); @@ -131,7 +131,7 @@ fn instaconfirm_then_reconfirm_with_voting_trigger_works() { #[test] fn voting_should_extend_for_late_confirmation() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Passing.create(); run_to(10); assert_eq!(confirming_until(r), 11); @@ -142,7 +142,7 @@ fn voting_should_extend_for_late_confirmation() { #[test] fn should_instafail_during_extension_confirmation() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Passing.create(); run_to(10); assert_eq!(confirming_until(r), 11); @@ -155,7 +155,7 @@ fn should_instafail_during_extension_confirmation() { #[test] fn confirming_then_fail_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let r = Failing.create(); // Normally ends at 5 + 4 (voting period) = 9. assert_eq!(deciding_and_failing_since(r), 5); @@ -170,7 +170,7 @@ fn confirming_then_fail_works() { #[test] fn queueing_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Submit a proposal into a track with a queue len of 1. assert_ok!(Referenda::submit( RuntimeOrigin::signed(5), @@ -269,7 +269,7 @@ fn queueing_works() { #[test] fn alarm_interval_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let call = ::Preimages::bound(CallOf::::from(Call::nudge_referendum { index: 0, @@ -290,7 +290,7 @@ fn alarm_interval_works() { #[test] fn decision_time_is_correct() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let decision_time = |since: u64| { Pallet::::decision_time( &DecidingStatus { since: since.into(), confirming: None }, @@ -308,7 +308,7 @@ fn decision_time_is_correct() { #[test] fn auto_timeout_should_happen_with_nothing_but_submit() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // #1: submit assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -329,7 +329,7 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { #[test] fn tracks_are_distinguished() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -390,7 +390,7 @@ fn tracks_are_distinguished() { #[test] fn submit_errors_work() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let h = set_balance_proposal_bounded(1); // No track for Signed origins. assert_noop!( @@ -418,7 +418,7 @@ fn submit_errors_work() { #[test] fn decision_deposit_errors_work() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let e = Error::::NotOngoing; assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); @@ -440,7 +440,7 @@ fn decision_deposit_errors_work() { #[test] fn refund_deposit_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let e = Error::::BadReferendum; assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(1), 0), e); @@ -465,7 +465,7 @@ fn refund_deposit_works() { #[test] fn refund_submission_deposit_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // refund of non existing referendum fails. let e = Error::::BadReferendum; assert_noop!(Referenda::refund_submission_deposit(RuntimeOrigin::signed(1), 0), e); @@ -503,7 +503,7 @@ fn refund_submission_deposit_works() { #[test] fn cancel_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -522,7 +522,7 @@ fn cancel_works() { #[test] fn cancel_errors_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -540,7 +540,7 @@ fn cancel_errors_works() { #[test] fn kill_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -560,7 +560,7 @@ fn kill_works() { #[test] fn kill_errors_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), @@ -601,7 +601,7 @@ fn curve_handles_all_inputs() { #[test] fn set_metadata_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. let invalid_hash: PreimageHash = [1u8; 32].into(); @@ -649,7 +649,7 @@ fn set_metadata_works() { #[test] fn clear_metadata_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let hash = note_preimage(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index d61b8955443c2..ba89383888a7d 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -42,7 +42,7 @@ pub type PalletsOriginOf = pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, - ::BlockNumber, + BlockNumberFor, BoundedCallOf, BalanceOf, TallyOf, @@ -52,22 +52,19 @@ pub type ReferendumInfoOf = ReferendumInfo< pub type ReferendumStatusOf = ReferendumStatus< TrackIdOf, PalletsOriginOf, - ::BlockNumber, + BlockNumberFor, BoundedCallOf, BalanceOf, TallyOf, ::AccountId, ScheduleAddressOf, >; -pub type DecidingStatusOf = DecidingStatus<::BlockNumber>; -pub type TrackInfoOf = - TrackInfo, ::BlockNumber>; -pub type TrackIdOf = <>::Tracks as TracksInfo< - BalanceOf, - ::BlockNumber, ->>::Id; +pub type DecidingStatusOf = DecidingStatus>; +pub type TrackInfoOf = TrackInfo, BlockNumberFor>; +pub type TrackIdOf = + <>::Tracks as TracksInfo, BlockNumberFor>>::Id; pub type ScheduleAddressOf = <>::Scheduler as Anon< - ::BlockNumber, + BlockNumberFor, CallOf, PalletsOriginOf, >>::Address; diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index 464e60dc581e6..4b89379b311da 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_referenda //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_referenda +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_referenda. pub trait WeightInfo { @@ -93,8 +97,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `220` // Estimated: `110487` - // Minimum execution time: 42_285_000 picoseconds. - Weight::from_parts(42_646_000, 110487) + // Minimum execution time: 40_175_000 picoseconds. + Weight::from_parts(41_107_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -106,8 +110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 53_455_000 picoseconds. - Weight::from_parts(54_034_000, 219984) + // Minimum execution time: 50_922_000 picoseconds. + Weight::from_parts(52_179_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -117,14 +121,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3107` - // Estimated: `5477` - // Minimum execution time: 50_138_000 picoseconds. - Weight::from_parts(50_449_000, 5477) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `3260` + // Estimated: `110487` + // Minimum execution time: 69_559_000 picoseconds. + Weight::from_parts(72_143_000, 110487) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -132,14 +138,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3127` - // Estimated: `5477` - // Minimum execution time: 49_707_000 picoseconds. - Weight::from_parts(50_246_000, 5477) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `3280` + // Estimated: `110487` + // Minimum execution time: 68_833_000 picoseconds. + Weight::from_parts(70_987_000, 110487) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -151,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 62_880_000 picoseconds. - Weight::from_parts(63_579_000, 219984) + // Minimum execution time: 61_794_000 picoseconds. + Weight::from_parts(62_846_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -166,8 +174,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 60_827_000 picoseconds. - Weight::from_parts(61_392_000, 219984) + // Minimum execution time: 58_664_000 picoseconds. + Weight::from_parts(60_195_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -177,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3831` - // Minimum execution time: 31_991_000 picoseconds. - Weight::from_parts(32_474_000, 3831) + // Minimum execution time: 30_850_000 picoseconds. + Weight::from_parts(32_130_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -188,8 +196,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3831` - // Minimum execution time: 32_162_000 picoseconds. - Weight::from_parts(32_776_000, 3831) + // Minimum execution time: 30_747_000 picoseconds. + Weight::from_parts(32_196_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,8 +209,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `219984` - // Minimum execution time: 37_493_000 picoseconds. - Weight::from_parts(37_979_000, 219984) + // Minimum execution time: 36_139_000 picoseconds. + Weight::from_parts(37_252_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -216,8 +224,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `622` // Estimated: `219984` - // Minimum execution time: 80_095_000 picoseconds. - Weight::from_parts(80_831_000, 219984) + // Minimum execution time: 80_862_000 picoseconds. + Weight::from_parts(83_045_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -229,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `5477` - // Minimum execution time: 10_906_000 picoseconds. - Weight::from_parts(11_055_000, 5477) + // Minimum execution time: 10_136_000 picoseconds. + Weight::from_parts(10_638_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,61 +246,57 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) + /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `4567` - // Estimated: `219984` - // Minimum execution time: 90_747_000 picoseconds. - Weight::from_parts(91_407_000, 219984) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `3150` + // Estimated: `110487` + // Minimum execution time: 52_022_000 picoseconds. + Weight::from_parts(53_910_000, 110487) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) + /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `4567` - // Estimated: `219984` - // Minimum execution time: 93_615_000 picoseconds. - Weight::from_parts(94_245_000, 219984) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `3150` + // Estimated: `110487` + // Minimum execution time: 53_683_000 picoseconds. + Weight::from_parts(55_707_000, 110487) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `4588` - // Estimated: `110487` - // Minimum execution time: 60_945_000 picoseconds. - Weight::from_parts(61_246_000, 110487) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `3011` + // Estimated: `5477` + // Minimum execution time: 24_043_000 picoseconds. + Weight::from_parts(24_512_000, 5477) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `4574` - // Estimated: `110487` - // Minimum execution time: 60_105_000 picoseconds. - Weight::from_parts(60_544_000, 110487) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `3011` + // Estimated: `5477` + // Minimum execution time: 23_588_000 picoseconds. + Weight::from_parts(24_422_000, 5477) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -300,16 +304,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4548` - // Estimated: `110487` - // Minimum execution time: 62_251_000 picoseconds. - Weight::from_parts(62_952_000, 110487) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `3015` + // Estimated: `5477` + // Minimum execution time: 31_443_000 picoseconds. + Weight::from_parts(32_725_000, 5477) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -317,16 +319,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4582` - // Estimated: `110487` - // Minimum execution time: 61_527_000 picoseconds. - Weight::from_parts(62_082_000, 110487) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `3035` + // Estimated: `5477` + // Minimum execution time: 30_319_000 picoseconds. + Weight::from_parts(31_652_000, 5477) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -336,8 +336,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `333` // Estimated: `110487` - // Minimum execution time: 24_897_000 picoseconds. - Weight::from_parts(25_213_000, 110487) + // Minimum execution time: 23_062_000 picoseconds. + Weight::from_parts(23_614_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -349,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 25_077_000 picoseconds. - Weight::from_parts(25_385_000, 110487) + // Minimum execution time: 23_537_000 picoseconds. + Weight::from_parts(24_267_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -360,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3831` - // Minimum execution time: 17_930_000 picoseconds. - Weight::from_parts(18_112_000, 3831) + // Minimum execution time: 16_388_000 picoseconds. + Weight::from_parts(16_676_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -375,8 +375,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 34_405_000 picoseconds. - Weight::from_parts(34_698_000, 110487) + // Minimum execution time: 32_801_000 picoseconds. + Weight::from_parts(34_053_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -390,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 37_313_000 picoseconds. - Weight::from_parts(37_807_000, 110487) + // Minimum execution time: 35_704_000 picoseconds. + Weight::from_parts(36_451_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -403,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 30_552_000 picoseconds. - Weight::from_parts(30_817_000, 110487) + // Minimum execution time: 29_151_000 picoseconds. + Weight::from_parts(30_055_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -416,8 +416,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `417` // Estimated: `110487` - // Minimum execution time: 31_100_000 picoseconds. - Weight::from_parts(31_696_000, 110487) + // Minimum execution time: 29_265_000 picoseconds. + Weight::from_parts(30_213_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -429,8 +429,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 28_777_000 picoseconds. - Weight::from_parts(29_188_000, 110487) + // Minimum execution time: 27_760_000 picoseconds. + Weight::from_parts(28_381_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -442,8 +442,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `438` // Estimated: `110487` - // Minimum execution time: 26_986_000 picoseconds. - Weight::from_parts(27_283_000, 110487) + // Minimum execution time: 25_464_000 picoseconds. + Weight::from_parts(26_348_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -457,8 +457,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `438` // Estimated: `219984` - // Minimum execution time: 43_538_000 picoseconds. - Weight::from_parts(44_671_000, 219984) + // Minimum execution time: 42_629_000 picoseconds. + Weight::from_parts(43_732_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -470,8 +470,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 30_559_000 picoseconds. - Weight::from_parts(31_294_000, 110487) + // Minimum execution time: 30_015_000 picoseconds. + Weight::from_parts(30_827_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -485,8 +485,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `422` // Estimated: `3831` - // Minimum execution time: 21_196_000 picoseconds. - Weight::from_parts(21_593_000, 3831) + // Minimum execution time: 19_901_000 picoseconds. + Weight::from_parts(20_681_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -498,8 +498,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3831` - // Minimum execution time: 18_827_000 picoseconds. - Weight::from_parts(19_171_000, 3831) + // Minimum execution time: 17_323_000 picoseconds. + Weight::from_parts(18_227_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -517,8 +517,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `220` // Estimated: `110487` - // Minimum execution time: 42_285_000 picoseconds. - Weight::from_parts(42_646_000, 110487) + // Minimum execution time: 40_175_000 picoseconds. + Weight::from_parts(41_107_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -530,8 +530,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 53_455_000 picoseconds. - Weight::from_parts(54_034_000, 219984) + // Minimum execution time: 50_922_000 picoseconds. + Weight::from_parts(52_179_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -541,14 +541,16 @@ impl WeightInfo for () { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3107` - // Estimated: `5477` - // Minimum execution time: 50_138_000 picoseconds. - Weight::from_parts(50_449_000, 5477) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `3260` + // Estimated: `110487` + // Minimum execution time: 69_559_000 picoseconds. + Weight::from_parts(72_143_000, 110487) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -556,14 +558,16 @@ impl WeightInfo for () { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3127` - // Estimated: `5477` - // Minimum execution time: 49_707_000 picoseconds. - Weight::from_parts(50_246_000, 5477) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `3280` + // Estimated: `110487` + // Minimum execution time: 68_833_000 picoseconds. + Weight::from_parts(70_987_000, 110487) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -575,8 +579,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 62_880_000 picoseconds. - Weight::from_parts(63_579_000, 219984) + // Minimum execution time: 61_794_000 picoseconds. + Weight::from_parts(62_846_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -590,8 +594,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `473` // Estimated: `219984` - // Minimum execution time: 60_827_000 picoseconds. - Weight::from_parts(61_392_000, 219984) + // Minimum execution time: 58_664_000 picoseconds. + Weight::from_parts(60_195_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -601,8 +605,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3831` - // Minimum execution time: 31_991_000 picoseconds. - Weight::from_parts(32_474_000, 3831) + // Minimum execution time: 30_850_000 picoseconds. + Weight::from_parts(32_130_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -612,8 +616,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3831` - // Minimum execution time: 32_162_000 picoseconds. - Weight::from_parts(32_776_000, 3831) + // Minimum execution time: 30_747_000 picoseconds. + Weight::from_parts(32_196_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -625,8 +629,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `219984` - // Minimum execution time: 37_493_000 picoseconds. - Weight::from_parts(37_979_000, 219984) + // Minimum execution time: 36_139_000 picoseconds. + Weight::from_parts(37_252_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -640,8 +644,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `622` // Estimated: `219984` - // Minimum execution time: 80_095_000 picoseconds. - Weight::from_parts(80_831_000, 219984) + // Minimum execution time: 80_862_000 picoseconds. + Weight::from_parts(83_045_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -653,8 +657,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `5477` - // Minimum execution time: 10_906_000 picoseconds. - Weight::from_parts(11_055_000, 5477) + // Minimum execution time: 10_136_000 picoseconds. + Weight::from_parts(10_638_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -662,61 +666,57 @@ impl WeightInfo for () { /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) + /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `4567` - // Estimated: `219984` - // Minimum execution time: 90_747_000 picoseconds. - Weight::from_parts(91_407_000, 219984) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `3150` + // Estimated: `110487` + // Minimum execution time: 52_022_000 picoseconds. + Weight::from_parts(53_910_000, 110487) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) + /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `4567` - // Estimated: `219984` - // Minimum execution time: 93_615_000 picoseconds. - Weight::from_parts(94_245_000, 219984) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `3150` + // Estimated: `110487` + // Minimum execution time: 53_683_000 picoseconds. + Weight::from_parts(55_707_000, 110487) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `4588` - // Estimated: `110487` - // Minimum execution time: 60_945_000 picoseconds. - Weight::from_parts(61_246_000, 110487) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `3011` + // Estimated: `5477` + // Minimum execution time: 24_043_000 picoseconds. + Weight::from_parts(24_512_000, 5477) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `4574` - // Estimated: `110487` - // Minimum execution time: 60_105_000 picoseconds. - Weight::from_parts(60_544_000, 110487) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `3011` + // Estimated: `5477` + // Minimum execution time: 23_588_000 picoseconds. + Weight::from_parts(24_422_000, 5477) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -724,16 +724,14 @@ impl WeightInfo for () { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4548` - // Estimated: `110487` - // Minimum execution time: 62_251_000 picoseconds. - Weight::from_parts(62_952_000, 110487) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `3015` + // Estimated: `5477` + // Minimum execution time: 31_443_000 picoseconds. + Weight::from_parts(32_725_000, 5477) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -741,16 +739,14 @@ impl WeightInfo for () { /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) /// Storage: Referenda TrackQueue (r:1 w:1) /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `4582` - // Estimated: `110487` - // Minimum execution time: 61_527_000 picoseconds. - Weight::from_parts(62_082_000, 110487) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `3035` + // Estimated: `5477` + // Minimum execution time: 30_319_000 picoseconds. + Weight::from_parts(31_652_000, 5477) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Referenda ReferendumInfoFor (r:1 w:1) /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) @@ -760,8 +756,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `333` // Estimated: `110487` - // Minimum execution time: 24_897_000 picoseconds. - Weight::from_parts(25_213_000, 110487) + // Minimum execution time: 23_062_000 picoseconds. + Weight::from_parts(23_614_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -773,8 +769,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 25_077_000 picoseconds. - Weight::from_parts(25_385_000, 110487) + // Minimum execution time: 23_537_000 picoseconds. + Weight::from_parts(24_267_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -784,8 +780,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3831` - // Minimum execution time: 17_930_000 picoseconds. - Weight::from_parts(18_112_000, 3831) + // Minimum execution time: 16_388_000 picoseconds. + Weight::from_parts(16_676_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -799,8 +795,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 34_405_000 picoseconds. - Weight::from_parts(34_698_000, 110487) + // Minimum execution time: 32_801_000 picoseconds. + Weight::from_parts(34_053_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -814,8 +810,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `110487` - // Minimum execution time: 37_313_000 picoseconds. - Weight::from_parts(37_807_000, 110487) + // Minimum execution time: 35_704_000 picoseconds. + Weight::from_parts(36_451_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -827,8 +823,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 30_552_000 picoseconds. - Weight::from_parts(30_817_000, 110487) + // Minimum execution time: 29_151_000 picoseconds. + Weight::from_parts(30_055_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -840,8 +836,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `417` // Estimated: `110487` - // Minimum execution time: 31_100_000 picoseconds. - Weight::from_parts(31_696_000, 110487) + // Minimum execution time: 29_265_000 picoseconds. + Weight::from_parts(30_213_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -853,8 +849,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 28_777_000 picoseconds. - Weight::from_parts(29_188_000, 110487) + // Minimum execution time: 27_760_000 picoseconds. + Weight::from_parts(28_381_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -866,8 +862,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `438` // Estimated: `110487` - // Minimum execution time: 26_986_000 picoseconds. - Weight::from_parts(27_283_000, 110487) + // Minimum execution time: 25_464_000 picoseconds. + Weight::from_parts(26_348_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -881,8 +877,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `438` // Estimated: `219984` - // Minimum execution time: 43_538_000 picoseconds. - Weight::from_parts(44_671_000, 219984) + // Minimum execution time: 42_629_000 picoseconds. + Weight::from_parts(43_732_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -894,8 +890,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `434` // Estimated: `110487` - // Minimum execution time: 30_559_000 picoseconds. - Weight::from_parts(31_294_000, 110487) + // Minimum execution time: 30_015_000 picoseconds. + Weight::from_parts(30_827_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -909,8 +905,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `422` // Estimated: `3831` - // Minimum execution time: 21_196_000 picoseconds. - Weight::from_parts(21_593_000, 3831) + // Minimum execution time: 19_901_000 picoseconds. + Weight::from_parts(20_681_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -922,8 +918,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3831` - // Minimum execution time: 18_827_000 picoseconds. - Weight::from_parts(19_171_000, 3831) + // Minimum execution time: 17_323_000 picoseconds. + Weight::from_parts(18_227_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/remark/Cargo.toml b/frame/remark/Cargo.toml index 5fc595d710680..df77770c3a846 100644 --- a/frame/remark/Cargo.toml +++ b/frame/remark/Cargo.toml @@ -13,23 +13,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } [features] default = ["std"] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] std = [ "frame-benchmarking?/std", "codec/std", @@ -42,4 +47,8 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = [ "frame-support/try-runtime" ] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/remark/src/mock.rs b/frame/remark/src/mock.rs index 39f5d50ed28fd..e597a1ca4dfe8 100644 --- a/frame/remark/src/mock.rs +++ b/frame/remark/src/mock.rs @@ -21,22 +21,17 @@ use crate as pallet_remark; use frame_support::traits::{ConstU16, ConstU32, ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; pub type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Remark: pallet_remark::{ Pallet, Call, Event }, } ); @@ -47,13 +42,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -74,6 +68,6 @@ impl pallet_remark::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { system: Default::default() }.build_storage().unwrap(); + let t = RuntimeGenesisConfig { system: Default::default() }.build_storage().unwrap(); t.into() } diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index 9aa56eb339fcb..46475db163ffd 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_remark //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_remark +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_remark. pub trait WeightInfo { @@ -59,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_301_000 picoseconds. - Weight::from_parts(2_516_065, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(l.into())) + // Minimum execution time: 8_471_000 picoseconds. + Weight::from_parts(8_586_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) } } @@ -73,9 +77,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_301_000 picoseconds. - Weight::from_parts(2_516_065, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_089, 0).saturating_mul(l.into())) + // Minimum execution time: 8_471_000 picoseconds. + Weight::from_parts(8_586_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) } } diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml index 58fa281dc3d76..6b7623b9af0e7 100644 --- a/frame/root-offences/Cargo.toml +++ b/frame/root-offences/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } @@ -21,7 +21,7 @@ pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../. frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } [dev-dependencies] @@ -29,22 +29,48 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } [features] -runtime-benchmarks = [] -try-runtime = ["frame-support/try-runtime"] +runtime-benchmarks = [ + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" +] default = ["std"] std = [ "codec/std", "frame-support/std", - "frame-system/std", + + "frame-system/std", "pallet-session/std", "pallet-staking/std", "scale-info/std", "sp-runtime/std", + "frame-election-provider-support/std", + "pallet-balances/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-io/std", + "sp-staking/std", + "sp-std/std" ] diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs index e48360ed34e24..2d2a5476149f6 100644 --- a/frame/root-offences/src/mock.rs +++ b/frame/root-offences/src/mock.rs @@ -18,22 +18,25 @@ use super::*; use crate as root_offences; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, Hooks, OneSessionHandler}, + traits::{ConstU32, ConstU64, Hooks, OneSessionHandler}, }; use pallet_staking::StakerStatus; use sp_core::H256; use sp_runtime::{ curve::PiecewiseLinear, - testing::{Header, UintAuthorityId}, + testing::UintAuthorityId, traits::{BlakeTwo256, IdentityLookup, Zero}, + BuildStorage, }; use sp_staking::{EraIndex, SessionIndex}; use sp_std::collections::btree_map::BTreeMap; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type AccountId = u64; type Balance = u64; @@ -43,12 +46,9 @@ pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, @@ -90,14 +90,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -123,7 +122,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -138,6 +137,10 @@ pallet_staking_reward_curve::build! { ); } +parameter_types! { + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); +} + pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; @@ -145,19 +148,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; -} - -pub struct OnStakerSlashMock(core::marker::PhantomData); -impl sp_staking::OnStakerSlash for OnStakerSlashMock { - fn on_slash( - _pool_account: &AccountId, - slashed_bonded: Balance, - slashed_chunks: &BTreeMap, - ) { - LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); - } + type Bounds = ElectionsBounds; } parameter_types! { @@ -172,11 +163,10 @@ parameter_types! { } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -193,10 +183,11 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type OnStakerSlash = OnStakerSlashMock; + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -255,7 +246,7 @@ impl Default for ExtBuilder { impl ExtBuilder { fn build(self) -> sp_io::TestExternalities { - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![ diff --git a/frame/root-testing/Cargo.toml b/frame/root-testing/Cargo.toml index 6e8c13f775de0..e6f32587d4593 100644 --- a/frame/root-testing/Cargo.toml +++ b/frame/root-testing/Cargo.toml @@ -14,22 +14,31 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] default = ["std"] std = [ "codec/std", "frame-support/std", - "frame-system/std", + + "frame-system/std", "scale-info/std", - "sp-runtime/std", + + "sp-runtime/std", + "sp-core/std", + "sp-io/std", + "sp-std/std" ] diff --git a/frame/salary/Cargo.toml b/frame/salary/Cargo.toml index 86cae16bb28b7..8fedecf4cec97 100644 --- a/frame/salary/Cargo.toml +++ b/frame/salary/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -46,4 +46,8 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/salary/src/benchmarking.rs b/frame/salary/src/benchmarking.rs index 339185b37cb7b..7528293506aec 100644 --- a/frame/salary/src/benchmarking.rs +++ b/frame/salary/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -107,7 +107,7 @@ mod benchmarks { System::::set_block_number(System::::block_number() + T::RegistrationPeriod::get()); let salary = T::Salary::get_salary(T::Members::rank_of(&caller).unwrap(), &caller); - T::Paymaster::ensure_successful(&caller, salary); + T::Paymaster::ensure_successful(&caller, (), salary); #[extrinsic_call] _(RawOrigin::Signed(caller.clone())); @@ -134,7 +134,7 @@ mod benchmarks { let salary = T::Salary::get_salary(T::Members::rank_of(&caller).unwrap(), &caller); let recipient: T::AccountId = account("recipient", 0, SEED); - T::Paymaster::ensure_successful(&recipient, salary); + T::Paymaster::ensure_successful(&recipient, (), salary); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), recipient.clone()); @@ -161,7 +161,7 @@ mod benchmarks { let salary = T::Salary::get_salary(T::Members::rank_of(&caller).unwrap(), &caller); let recipient: T::AccountId = account("recipient", 0, SEED); - T::Paymaster::ensure_successful(&recipient, salary); + T::Paymaster::ensure_successful(&recipient, (), salary); Salary::::payout(RawOrigin::Signed(caller.clone()).into()).unwrap(); let id = match Claimant::::get(&caller).unwrap().status { Attempted { id, .. } => id, diff --git a/frame/salary/src/lib.rs b/frame/salary/src/lib.rs index 0b2b4b47d8b70..53dd7224909a8 100644 --- a/frame/salary/src/lib.rs +++ b/frame/salary/src/lib.rs @@ -126,14 +126,14 @@ pub mod pallet { /// The number of blocks between sequential payout cycles is the sum of this and /// `PayoutPeriod`. #[pallet::constant] - type RegistrationPeriod: Get; + type RegistrationPeriod: Get>; /// The number of blocks within a cycle which accounts have to claim the payout. /// /// The number of blocks between sequential payout cycles is the sum of this and /// `RegistrationPeriod`. #[pallet::constant] - type PayoutPeriod: Get; + type PayoutPeriod: Get>; /// The total budget per cycle. /// @@ -142,11 +142,10 @@ pub mod pallet { type Budget: Get>; } - pub type CycleIndexOf = ::BlockNumber; + pub type CycleIndexOf = BlockNumberFor; pub type BalanceOf = <>::Paymaster as Pay>::Balance; pub type IdOf = <>::Paymaster as Pay>::Id; - pub type StatusOf = - StatusType, ::BlockNumber, BalanceOf>; + pub type StatusOf = StatusType, BlockNumberFor, BalanceOf>; pub type ClaimantStatusOf = ClaimantStatus, BalanceOf, IdOf>; /// The overall status of the system. @@ -389,7 +388,7 @@ pub mod pallet { pub fn last_active(who: &T::AccountId) -> Result, DispatchError> { Ok(Claimant::::get(&who).ok_or(Error::::NotInducted)?.last_active) } - pub fn cycle_period() -> T::BlockNumber { + pub fn cycle_period() -> BlockNumberFor { T::RegistrationPeriod::get() + T::PayoutPeriod::get() } fn do_payout(who: T::AccountId, beneficiary: T::AccountId) -> DispatchResult { @@ -436,8 +435,8 @@ pub mod pallet { claimant.last_active = status.cycle_index; - let id = T::Paymaster::pay(&beneficiary, (), payout) - .map_err(|()| Error::::PayError)?; + let id = + T::Paymaster::pay(&beneficiary, (), payout).map_err(|_| Error::::PayError)?; claimant.status = Attempted { registered, id, amount: payout }; diff --git a/frame/salary/src/tests.rs b/frame/salary/src/tests.rs index 1b7bc6cbb6df5..034dce24b8b38 100644 --- a/frame/salary/src/tests.rs +++ b/frame/salary/src/tests.rs @@ -27,25 +27,20 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, Identity, IdentityLookup}, - DispatchResult, + BuildStorage, DispatchResult, }; use sp_std::cell::RefCell; use super::*; use crate as pallet_salary; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Salary: pallet_salary::{Pallet, Call, Storage, Event}, } ); @@ -60,14 +55,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -103,12 +97,13 @@ impl Pay for TestPay { type Balance = u64; type Id = u64; type AssetKind = (); + type Error = (); fn pay( who: &Self::Beneficiary, _: Self::AssetKind, amount: Self::Balance, - ) -> Result { + ) -> Result { PAID.with(|paid| *paid.borrow_mut().entry(*who).or_default() += amount); Ok(LAST_ID.with(|lid| { let x = *lid.borrow(); @@ -120,7 +115,7 @@ impl Pay for TestPay { STATUS.with(|s| s.borrow().get(&id).cloned().unwrap_or(PaymentStatus::Unknown)) } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::Beneficiary, _: Self::Balance) {} + fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) {} #[cfg(feature = "runtime-benchmarks")] fn ensure_concluded(id: Self::Id) { set_status(id, PaymentStatus::Failure) @@ -186,7 +181,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext diff --git a/frame/salary/src/weights.rs b/frame/salary/src/weights.rs index 074bff4da170d..3d3b9e315959b 100644 --- a/frame/salary/src/weights.rs +++ b/frame/salary/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_salary //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_salary +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_salary. pub trait WeightInfo { @@ -66,8 +70,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 11_785_000 picoseconds. - Weight::from_parts(12_086_000, 1541) + // Minimum execution time: 10_778_000 picoseconds. + Weight::from_parts(11_084_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -77,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_721_000 picoseconds. - Weight::from_parts(13_033_000, 1541) + // Minimum execution time: 12_042_000 picoseconds. + Weight::from_parts(12_645_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -90,10 +94,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `362` + // Measured: `395` // Estimated: `3543` - // Minimum execution time: 19_516_000 picoseconds. - Weight::from_parts(19_938_000, 3543) + // Minimum execution time: 18_374_000 picoseconds. + Weight::from_parts(19_200_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,10 +109,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3543` - // Minimum execution time: 23_145_000 picoseconds. - Weight::from_parts(23_804_000, 3543) + // Minimum execution time: 22_696_000 picoseconds. + Weight::from_parts(23_275_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +124,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3543` - // Minimum execution time: 62_187_000 picoseconds. - Weight::from_parts(63_016_000, 3543) + // Minimum execution time: 63_660_000 picoseconds. + Weight::from_parts(65_006_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -137,10 +141,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3593` - // Minimum execution time: 63_828_000 picoseconds. - Weight::from_parts(64_791_000, 3593) + // Minimum execution time: 64_706_000 picoseconds. + Weight::from_parts(65_763_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -152,8 +156,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 12_911_000 picoseconds. - Weight::from_parts(13_079_000, 3543) + // Minimum execution time: 11_838_000 picoseconds. + Weight::from_parts(12_323_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +171,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 11_785_000 picoseconds. - Weight::from_parts(12_086_000, 1541) + // Minimum execution time: 10_778_000 picoseconds. + Weight::from_parts(11_084_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -178,8 +182,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_721_000 picoseconds. - Weight::from_parts(13_033_000, 1541) + // Minimum execution time: 12_042_000 picoseconds. + Weight::from_parts(12_645_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -191,10 +195,10 @@ impl WeightInfo for () { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn induct() -> Weight { // Proof Size summary in bytes: - // Measured: `362` + // Measured: `395` // Estimated: `3543` - // Minimum execution time: 19_516_000 picoseconds. - Weight::from_parts(19_938_000, 3543) + // Minimum execution time: 18_374_000 picoseconds. + Weight::from_parts(19_200_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -206,10 +210,10 @@ impl WeightInfo for () { /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3543` - // Minimum execution time: 23_145_000 picoseconds. - Weight::from_parts(23_804_000, 3543) + // Minimum execution time: 22_696_000 picoseconds. + Weight::from_parts(23_275_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -221,10 +225,10 @@ impl WeightInfo for () { /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3543` - // Minimum execution time: 62_187_000 picoseconds. - Weight::from_parts(63_016_000, 3543) + // Minimum execution time: 63_660_000 picoseconds. + Weight::from_parts(65_006_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -238,10 +242,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `429` + // Measured: `462` // Estimated: `3593` - // Minimum execution time: 63_828_000 picoseconds. - Weight::from_parts(64_791_000, 3593) + // Minimum execution time: 64_706_000 picoseconds. + Weight::from_parts(65_763_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -253,8 +257,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 12_911_000 picoseconds. - Weight::from_parts(13_079_000, 3543) + // Minimum execution time: 11_838_000 picoseconds. + Weight::from_parts(12_323_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 50afac933f119..1d583d0892f9c 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -10,20 +10,21 @@ description = "FRAME Scheduler pallet" readme = "README.md" [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-weights = { version = "20.0.0", default-features = false, path = "../../primitives/weights" } +docify = "0.2.1" [dev-dependencies] pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] @@ -32,6 +33,9 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] std = [ "codec/std", @@ -44,5 +48,12 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-weights/std", + "pallet-preimage/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-preimage/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index d56e007ec9a2a..b41cea449654c 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -23,7 +23,7 @@ use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, }; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; @@ -42,7 +42,10 @@ type SystemOrigin = ::RuntimeOrigin; /// - `None`: aborted (hash without preimage) /// - `Some(true)`: hash resolves into call if possible, plain call otherwise /// - `Some(false)`: plain call -fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { +fn fill_schedule( + when: frame_system::pallet_prelude::BlockNumberFor, + n: u32, +) -> Result<(), &'static str> { let t = DispatchTime::At(when); let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); for i in 0..n { @@ -125,7 +128,7 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { benchmarks! { // `service_agendas` when no work is done. service_agendas_base { - let now = T::BlockNumber::from(BLOCK_NUMBER); + let now = BlockNumberFor::::from(BLOCK_NUMBER); IncompleteSince::::put(now - One::one()); }: { Scheduler::::service_agendas(&mut WeightMeter::max_limit(), now, 0); @@ -224,7 +227,7 @@ benchmarks! { schedule { let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let when = BLOCK_NUMBER.into(); - let periodic = Some((T::BlockNumber::one(), 100)); + let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; // Essentially a no-op call. let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); @@ -267,7 +270,7 @@ benchmarks! { let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); - let periodic = Some((T::BlockNumber::one(), 100)); + let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; // Essentially a no-op call. let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 8194f286c8323..3538331bbd4ca 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -15,37 +15,63 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Scheduler -//! A Pallet for scheduling dispatches. +//! > Made with *Substrate*, for *Polkadot*. //! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] +//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) - +//! [![polkadot]](https://polkadot.network) +//! +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! +//! # Scheduler Pallet +//! +//! A Pallet for scheduling runtime calls. //! //! ## Overview //! -//! This Pallet exposes capabilities for scheduling dispatches to occur at a -//! specified block number or at a specified period. These scheduled dispatches -//! may be named or anonymous and may be canceled. +//! This Pallet exposes capabilities for scheduling runtime calls to occur at a specified block +//! number or at a specified period. These scheduled runtime calls may be named or anonymous and may +//! be canceled. +//! +//! __NOTE:__ Instead of using the filter contained in the origin to call `fn schedule`, scheduled +//! runtime calls will be dispatched with the default filter for the origin: namely +//! `frame_system::Config::BaseCallFilter` for all origin types (except root which will get no +//! filter). +//! +//! If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter +//! will not be used when dispatching the schedule runtime call. +//! +//! ### Examples //! -//! **NOTE:** The scheduled calls will be dispatched with the default filter -//! for the origin: namely `frame_system::Config::BaseCallFilter` for all origin -//! except root which will get no filter. And not the filter contained in origin -//! use to call `fn schedule`. +//! 1. Scheduling a runtime call at a specific block. +#![doc = docify::embed!("src/tests.rs", basic_scheduling_works)] //! -//! If a call is scheduled using proxy or whatever mecanism which adds filter, -//! then those filter will not be used when dispatching the schedule call. +//! 2. Scheduling a preimage hash of a runtime call at a specifc block +#![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)] + +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. //! -//! ## Interface +//! ## Warning //! -//! ### Dispatchable Functions +//! This Pallet executes all scheduled runtime calls in the [`on_initialize`] hook. Do not execute +//! any runtime calls which should not be considered mandatory. //! -//! * `schedule` - schedule a dispatch, which may be periodic, to occur at a specified block and -//! with a specified priority. -//! * `cancel` - cancel a scheduled dispatch, specified by block number and index. -//! * `schedule_named` - augments the `schedule` interface with an additional `Vec` parameter -//! that can be used for identification. -//! * `cancel_named` - the named complement to the cancel function. +//! Please be aware that any scheduled runtime calls executed in a future block may __fail__ or may +//! result in __undefined behavior__ since the runtime could have upgraded between the time of +//! scheduling and execution. For example, the runtime upgrade could have: +//! +//! * Modified the implementation of the runtime call (runtime specification upgrade). +//! * Could lead to undefined behavior. +//! * Removed or changed the ordering/index of the runtime call. +//! * Could fail due to the runtime call index not being part of the `Call`. +//! * Could lead to undefined behavior, such as executing another runtime call with the same +//! index. +//! +//! [`on_initialize`]: frame_support::traits::Hooks::on_initialize // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -72,7 +98,10 @@ use frame_support::{ }, weights::{Weight, WeightMeter}, }; -use frame_system::{self as system}; +use frame_system::{ + pallet_prelude::BlockNumberFor, + {self as system}, +}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ @@ -123,7 +152,7 @@ use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2}; pub type ScheduledV2Of = ScheduledV2< Vec, ::RuntimeCall, - ::BlockNumber, + BlockNumberFor, ::PalletsOrigin, ::AccountId, >; @@ -131,7 +160,7 @@ pub type ScheduledV2Of = ScheduledV2< pub type ScheduledV3Of = ScheduledV3< Vec, CallOrHashOf, - ::BlockNumber, + BlockNumberFor, ::PalletsOrigin, ::AccountId, >; @@ -139,7 +168,7 @@ pub type ScheduledV3Of = ScheduledV3< pub type ScheduledOf = Scheduled< TaskName, Bounded<::RuntimeCall>, - ::BlockNumber, + BlockNumberFor, ::PalletsOrigin, ::AccountId, >; @@ -231,14 +260,14 @@ pub mod pallet { } #[pallet::storage] - pub type IncompleteSince = StorageValue<_, T::BlockNumber>; + pub type IncompleteSince = StorageValue<_, BlockNumberFor>; /// Items to be executed, indexed by the block number that they should be executed on. #[pallet::storage] pub type Agenda = StorageMap< _, Twox64Concat, - T::BlockNumber, + BlockNumberFor, BoundedVec>, T::MaxScheduledPerBlock>, ValueQuery, >; @@ -249,28 +278,28 @@ pub mod pallet { /// identities. #[pallet::storage] pub(crate) type Lookup = - StorageMap<_, Twox64Concat, TaskName, TaskAddress>; + StorageMap<_, Twox64Concat, TaskName, TaskAddress>>; /// Events type. #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Scheduled some task. - Scheduled { when: T::BlockNumber, index: u32 }, + Scheduled { when: BlockNumberFor, index: u32 }, /// Canceled some task. - Canceled { when: T::BlockNumber, index: u32 }, + Canceled { when: BlockNumberFor, index: u32 }, /// Dispatched some task. Dispatched { - task: TaskAddress, + task: TaskAddress>, id: Option, result: DispatchResult, }, /// The call for the provided hash was not found so the task has been aborted. - CallUnavailable { task: TaskAddress, id: Option }, + CallUnavailable { task: TaskAddress>, id: Option }, /// The given task was unable to be renewed since the agenda is full at that block. - PeriodicFailed { task: TaskAddress, id: Option }, + PeriodicFailed { task: TaskAddress>, id: Option }, /// The given task can never be executed since it is overweight. - PermanentlyOverweight { task: TaskAddress, id: Option }, + PermanentlyOverweight { task: TaskAddress>, id: Option }, } #[pallet::error] @@ -290,10 +319,10 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { /// Execute the scheduled calls - fn on_initialize(now: T::BlockNumber) -> Weight { + fn on_initialize(now: BlockNumberFor) -> Weight { let mut weight_counter = WeightMeter::from_limit(T::MaximumWeight::get()); Self::service_agendas(&mut weight_counter, now, u32::max_value()); - weight_counter.consumed + weight_counter.consumed() } } @@ -304,8 +333,8 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] pub fn schedule( origin: OriginFor, - when: T::BlockNumber, - maybe_periodic: Option>, + when: BlockNumberFor, + maybe_periodic: Option>>, priority: schedule::Priority, call: Box<::RuntimeCall>, ) -> DispatchResult { @@ -324,7 +353,7 @@ pub mod pallet { /// Cancel an anonymously scheduled task. #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] - pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { + pub fn cancel(origin: OriginFor, when: BlockNumberFor, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; @@ -337,8 +366,8 @@ pub mod pallet { pub fn schedule_named( origin: OriginFor, id: TaskName, - when: T::BlockNumber, - maybe_periodic: Option>, + when: BlockNumberFor, + maybe_periodic: Option>>, priority: schedule::Priority, call: Box<::RuntimeCall>, ) -> DispatchResult { @@ -370,8 +399,8 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] pub fn schedule_after( origin: OriginFor, - after: T::BlockNumber, - maybe_periodic: Option>, + after: BlockNumberFor, + maybe_periodic: Option>>, priority: schedule::Priority, call: Box<::RuntimeCall>, ) -> DispatchResult { @@ -393,8 +422,8 @@ pub mod pallet { pub fn schedule_named_after( origin: OriginFor, id: TaskName, - after: T::BlockNumber, - maybe_periodic: Option>, + after: BlockNumberFor, + maybe_periodic: Option>>, priority: schedule::Priority, call: Box<::RuntimeCall>, ) -> DispatchResult { @@ -434,7 +463,7 @@ impl> Pallet { } Agenda::::translate::< - Vec::RuntimeCall, T::BlockNumber>>>, + Vec::RuntimeCall, BlockNumberFor>>>, _, >(|_, agenda| { Some(BoundedVec::truncate_from( @@ -669,7 +698,7 @@ impl Pallet { Scheduled< TaskName, Bounded<::RuntimeCall>, - T::BlockNumber, + BlockNumberFor, OldOrigin, T::AccountId, >, @@ -695,7 +724,9 @@ impl Pallet { }); } - fn resolve_time(when: DispatchTime) -> Result { + fn resolve_time( + when: DispatchTime>, + ) -> Result, DispatchError> { let now = frame_system::Pallet::::block_number(); let when = match when { @@ -713,9 +744,9 @@ impl Pallet { } fn place_task( - when: T::BlockNumber, + when: BlockNumberFor, what: ScheduledOf, - ) -> Result, (DispatchError, ScheduledOf)> { + ) -> Result>, (DispatchError, ScheduledOf)> { let maybe_name = what.maybe_id; let index = Self::push_to_agenda(when, what)?; let address = (when, index); @@ -727,7 +758,7 @@ impl Pallet { } fn push_to_agenda( - when: T::BlockNumber, + when: BlockNumberFor, what: ScheduledOf, ) -> Result)> { let mut agenda = Agenda::::get(when); @@ -749,7 +780,7 @@ impl Pallet { /// Remove trailing `None` items of an agenda at `when`. If all items are `None` remove the /// agenda record entirely. - fn cleanup_agenda(when: T::BlockNumber) { + fn cleanup_agenda(when: BlockNumberFor) { let mut agenda = Agenda::::get(when); match agenda.iter().rposition(|i| i.is_some()) { Some(i) if agenda.len() > i + 1 => { @@ -764,12 +795,12 @@ impl Pallet { } fn do_schedule( - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: Bounded<::RuntimeCall>, - ) -> Result, DispatchError> { + ) -> Result>, DispatchError> { let when = Self::resolve_time(when)?; let lookup_hash = call.lookup_hash(); @@ -799,7 +830,7 @@ impl Pallet { fn do_cancel( origin: Option, - (when, index): TaskAddress, + (when, index): TaskAddress>, ) -> Result<(), DispatchError> { let scheduled = Agenda::::try_mutate(when, |agenda| { agenda.get_mut(index as usize).map_or( @@ -831,9 +862,9 @@ impl Pallet { } fn do_reschedule( - (when, index): TaskAddress, - new_time: DispatchTime, - ) -> Result, DispatchError> { + (when, index): TaskAddress>, + new_time: DispatchTime>, + ) -> Result>, DispatchError> { let new_time = Self::resolve_time(new_time)?; if new_time == when { @@ -853,12 +884,12 @@ impl Pallet { fn do_schedule_named( id: TaskName, - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: Bounded<::RuntimeCall>, - ) -> Result, DispatchError> { + ) -> Result>, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { return Err(Error::::FailedToSchedule.into()) @@ -922,8 +953,8 @@ impl Pallet { fn do_reschedule_named( id: TaskName, - new_time: DispatchTime, - ) -> Result, DispatchError> { + new_time: DispatchTime>, + ) -> Result>, DispatchError> { let new_time = Self::resolve_time(new_time)?; let lookup = Lookup::::get(id); @@ -953,8 +984,8 @@ use ServiceTaskError::*; impl Pallet { /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. - fn service_agendas(weight: &mut WeightMeter, now: T::BlockNumber, max: u32) { - if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { + fn service_agendas(weight: &mut WeightMeter, now: BlockNumberFor, max: u32) { + if weight.try_consume(T::WeightInfo::service_agendas_base()).is_err() { return } @@ -965,7 +996,7 @@ impl Pallet { let max_items = T::MaxScheduledPerBlock::get(); let mut count_down = max; let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); - while count_down > 0 && when <= now && weight.can_accrue(service_agenda_base_weight) { + while count_down > 0 && when <= now && weight.can_consume(service_agenda_base_weight) { if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { incomplete_since = incomplete_since.min(when); } @@ -983,8 +1014,8 @@ impl Pallet { fn service_agenda( weight: &mut WeightMeter, executed: &mut u32, - now: T::BlockNumber, - when: T::BlockNumber, + now: BlockNumberFor, + when: BlockNumberFor, max: u32, ) -> bool { let mut agenda = Agenda::::get(when); @@ -996,8 +1027,9 @@ impl Pallet { }) .collect::>(); ordered.sort_by_key(|k| k.1); - let within_limit = - weight.check_accrue(T::WeightInfo::service_agenda_base(ordered.len() as u32)); + let within_limit = weight + .try_consume(T::WeightInfo::service_agenda_base(ordered.len() as u32)) + .is_ok(); debug_assert!(within_limit, "weight limit should have been checked in advance"); // Items which we know can be executed and have postponed for execution in a later block. @@ -1015,7 +1047,7 @@ impl Pallet { task.maybe_id.is_some(), task.maybe_periodic.is_some(), ); - if !weight.can_accrue(base_weight) { + if !weight.can_consume(base_weight) { postponed += 1; break } @@ -1052,8 +1084,8 @@ impl Pallet { /// - Rescheduling the task for execution in a later agenda if periodic. fn service_task( weight: &mut WeightMeter, - now: T::BlockNumber, - when: T::BlockNumber, + now: BlockNumberFor, + when: BlockNumberFor, agenda_index: u32, is_first: bool, mut task: ScheduledOf, @@ -1067,7 +1099,7 @@ impl Pallet { Err(_) => return Err((Unavailable, Some(task))), }; - weight.check_accrue(T::WeightInfo::service_task( + let _ = weight.try_consume(T::WeightInfo::service_task( lookup_len.map(|x| x as usize), task.maybe_id.is_some(), task.maybe_periodic.is_some(), @@ -1143,7 +1175,7 @@ impl Pallet { // We only allow a scheduled call if it cannot push the weight past the limit. let max_weight = base_weight.saturating_add(call_weight); - if !weight.can_accrue(max_weight) { + if !weight.can_consume(max_weight) { return Err(Overweight) } @@ -1154,21 +1186,21 @@ impl Pallet { (error_and_info.post_info.actual_weight, Err(error_and_info.error)), }; let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); - weight.check_accrue(base_weight); - weight.check_accrue(call_weight); + let _ = weight.try_consume(base_weight); + let _ = weight.try_consume(call_weight); Ok(result) } } impl> - schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> for Pallet + schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet { - type Address = TaskAddress; + type Address = TaskAddress>; type Hash = T::Hash; fn schedule( - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: CallOrHashOf, @@ -1184,26 +1216,26 @@ impl> fn reschedule( address: Self::Address, - when: DispatchTime, + when: DispatchTime>, ) -> Result { Self::do_reschedule(address, when) } - fn next_dispatch_time((when, index): Self::Address) -> Result { + fn next_dispatch_time((when, index): Self::Address) -> Result, ()> { Agenda::::get(when).get(index as usize).ok_or(()).map(|_| when) } } impl> - schedule::v2::Named::RuntimeCall, T::PalletsOrigin> for Pallet + schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet { - type Address = TaskAddress; + type Address = TaskAddress>; type Hash = T::Hash; fn schedule_named( id: Vec, - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: CallOrHashOf, @@ -1221,13 +1253,13 @@ impl> fn reschedule_named( id: Vec, - when: DispatchTime, + when: DispatchTime>, ) -> Result { let name = blake2_256(&id[..]); Self::do_reschedule_named(name, when) } - fn next_dispatch_time(id: Vec) -> Result { + fn next_dispatch_time(id: Vec) -> Result, ()> { let name = blake2_256(&id[..]); Lookup::::get(name) .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) @@ -1235,14 +1267,14 @@ impl> } } -impl schedule::v3::Anon::RuntimeCall, T::PalletsOrigin> +impl schedule::v3::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet { - type Address = TaskAddress; + type Address = TaskAddress>; fn schedule( - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: Bounded<::RuntimeCall>, @@ -1256,12 +1288,14 @@ impl schedule::v3::Anon::RuntimeCall, T fn reschedule( address: Self::Address, - when: DispatchTime, + when: DispatchTime>, ) -> Result { Self::do_reschedule(address, when).map_err(map_err_to_v3_err::) } - fn next_dispatch_time((when, index): Self::Address) -> Result { + fn next_dispatch_time( + (when, index): Self::Address, + ) -> Result, DispatchError> { Agenda::::get(when) .get(index as usize) .ok_or(DispatchError::Unavailable) @@ -1271,15 +1305,15 @@ impl schedule::v3::Anon::RuntimeCall, T use schedule::v3::TaskName; -impl schedule::v3::Named::RuntimeCall, T::PalletsOrigin> +impl schedule::v3::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet { - type Address = TaskAddress; + type Address = TaskAddress>; fn schedule_named( id: TaskName, - when: DispatchTime, - maybe_periodic: Option>, + when: DispatchTime>, + maybe_periodic: Option>>, priority: schedule::Priority, origin: T::PalletsOrigin, call: Bounded<::RuntimeCall>, @@ -1293,12 +1327,12 @@ impl schedule::v3::Named::RuntimeCall, fn reschedule_named( id: TaskName, - when: DispatchTime, + when: DispatchTime>, ) -> Result { Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::) } - fn next_dispatch_time(id: TaskName) -> Result { + fn next_dispatch_time(id: TaskName) -> Result, DispatchError> { Lookup::::get(id) .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) .ok_or(DispatchError::Unavailable) diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs index 5b3a7631eeac9..06259768f0aa1 100644 --- a/frame/scheduler/src/migration.rs +++ b/frame/scheduler/src/migration.rs @@ -19,6 +19,10 @@ use super::*; use frame_support::traits::OnRuntimeUpgrade; +use frame_system::pallet_prelude::BlockNumberFor; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; /// The log target. const TARGET: &'static str = "runtime::scheduler::migration"; @@ -31,22 +35,14 @@ pub mod v1 { pub(crate) type Agenda = StorageMap< Pallet, Twox64Concat, - ::BlockNumber, - Vec< - Option< - ScheduledV1<::RuntimeCall, ::BlockNumber>, - >, - >, + BlockNumberFor, + Vec::RuntimeCall, BlockNumberFor>>>, ValueQuery, >; #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; + pub(crate) type Lookup = + StorageMap, Twox64Concat, Vec, TaskAddress>>; } pub mod v2 { @@ -57,18 +53,14 @@ pub mod v2 { pub(crate) type Agenda = StorageMap< Pallet, Twox64Concat, - ::BlockNumber, + BlockNumberFor, Vec>>, ValueQuery, >; #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; + pub(crate) type Lookup = + StorageMap, Twox64Concat, Vec, TaskAddress>>; } pub mod v3 { @@ -79,26 +71,22 @@ pub mod v3 { pub(crate) type Agenda = StorageMap< Pallet, Twox64Concat, - ::BlockNumber, + BlockNumberFor, Vec>>, ValueQuery, >; #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; + pub(crate) type Lookup = + StorageMap, Twox64Concat, Vec, TaskAddress>>; /// Migrate the scheduler pallet from V3 to V4. pub struct MigrateToV4(sp_std::marker::PhantomData); impl> OnRuntimeUpgrade for MigrateToV4 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 3, "Can only upgrade from version 3"); + fn pre_upgrade() -> Result, TryRuntimeError> { + ensure!(StorageVersion::get::>() == 3, "Can only upgrade from version 3"); let agendas = Agenda::::iter_keys().count() as u32; let decodable_agendas = Agenda::::iter_values().count() as u32; @@ -125,7 +113,7 @@ pub mod v3 { agenda.len(), max_scheduled_per_block, ); - return Err("Agenda would overflow `MaxScheduledPerBlock`.") + return Err("Agenda would overflow `MaxScheduledPerBlock`.".into()) } } // Check that bounding the calls will not overflow `MAX_LENGTH`. @@ -142,7 +130,7 @@ pub mod v3 { block_number, l, ); - return Err("Call is too large.") + return Err("Call is too large.".into()) } }, _ => (), @@ -169,12 +157,12 @@ pub mod v3 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - assert_eq!(StorageVersion::get::>(), 4, "Must upgrade"); + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + ensure!(StorageVersion::get::>() == 4, "Must upgrade"); // Check that everything decoded fine. for k in crate::Agenda::::iter_keys() { - assert!(crate::Agenda::::try_get(k).is_ok(), "Cannot decode V4 Agenda"); + ensure!(crate::Agenda::::try_get(k).is_ok(), "Cannot decode V4 Agenda"); } let old_agendas: u32 = @@ -210,7 +198,7 @@ pub mod v4 { impl OnRuntimeUpgrade for CleanupAgendas { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { assert_eq!( StorageVersion::get::>(), 4, @@ -285,8 +273,8 @@ pub mod v4 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - assert_eq!(StorageVersion::get::>(), 4, "Version must not change"); + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + ensure!(StorageVersion::get::>() == 4, "Version must not change"); let (old_agendas, non_empty_agendas): (u32, u32) = Decode::decode(&mut state.as_ref()).expect("Must decode pre_upgrade state"); @@ -305,7 +293,7 @@ pub mod v4 { old_agendas, new_agendas ), } - assert_eq!(new_agendas, non_empty_agendas, "Expected to keep all non-empty agendas"); + ensure!(new_agendas == non_empty_agendas, "Expected to keep all non-empty agendas"); Ok(()) } @@ -496,7 +484,7 @@ mod test { // The pre_upgrade hook fails: let err = v3::MigrateToV4::::pre_upgrade().unwrap_err(); - assert!(err.contains("Call is too large")); + assert_eq!(DispatchError::from("Call is too large."), err); // But the migration itself works: let _w = v3::MigrateToV4::::on_runtime_upgrade(); diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index adb54fd78b181..28e334958d924 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -30,9 +30,8 @@ use frame_support::{ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, + BuildStorage, Perbill, }; // Logger module to track execution. @@ -50,7 +49,7 @@ pub mod logger { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::hooks] impl Hooks> for Pallet {} @@ -93,16 +92,12 @@ pub mod logger { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Logger: logger::{Pallet, Call, Event}, Scheduler: scheduler::{Pallet, Call, Storage, Event}, Preimage: pallet_preimage::{Pallet, Call, Storage, Event}, @@ -130,13 +125,12 @@ impl system::Config for Test { type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -225,7 +219,7 @@ impl Config for Test { pub type LoggerCall = logger::Call; pub fn new_test_ext() -> sp_io::TestExternalities { - let t = system::GenesisConfig::default().build_storage::().unwrap(); + let t = system::GenesisConfig::::default().build_storage().unwrap(); t.into() } diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index a0cac897d43df..477df5579dcf1 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -30,11 +30,18 @@ use sp_runtime::traits::Hash; use substrate_test_utils::assert_eq_uvec; #[test] +#[docify::export] fn basic_scheduling_works() { new_test_ext().execute_with(|| { + // Call to schedule let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); + + // BaseCallFilter should be implemented to accept `Logger::log` runtime call which is + // implemented for `BaseFilter` in the mock runtime assert!(!::BaseCallFilter::contains(&call)); + + // Schedule call to be executed at the 4th block assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, @@ -42,33 +49,53 @@ fn basic_scheduling_works() { root(), Preimage::bound(call).unwrap() )); + + // `log` runtime call should not have executed yet run_to_block(3); assert!(logger::log().is_empty()); + run_to_block(4); + // `log` runtime call should have executed at block 4 assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } #[test] +#[docify::export] fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { + // Call to schedule let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); + let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; - // Important to use here `Bounded::Lookup` to ensure that we request the hash. + + // Important to use here `Bounded::Lookup` to ensure that that the Scheduler can request the + // hash from PreImage to dispatch the call let hashed = Bounded::Lookup { hash, len }; + + // Schedule call to be executed at block 4 with the PreImage hash assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); + + // Register preimage on chain assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); assert!(Preimage::is_requested(&hash)); + + // `log` runtime call should not have executed yet run_to_block(3); assert!(logger::log().is_empty()); + run_to_block(4); + // preimage should not have been removed when executed by the scheduler assert!(!Preimage::len(&hash).is_some()); assert!(!Preimage::is_requested(&hash)); + // `log` runtime call should have executed at block 4 assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 897363f134e7b..58d711862591d 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_scheduler. pub trait WeightInfo { @@ -71,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_776_000 picoseconds. - Weight::from_parts(3_992_000, 1489) + // Minimum execution time: 3_991_000 picoseconds. + Weight::from_parts(4_174_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -83,10 +87,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_418_000 picoseconds. - Weight::from_parts(8_606_012, 110487) - // Standard Error: 769 - .saturating_add(Weight::from_parts(309_376, 0).saturating_mul(s.into())) + // Minimum execution time: 3_581_000 picoseconds. + Weight::from_parts(7_413_174, 110487) + // Standard Error: 971 + .saturating_add(Weight::from_parts(348_077, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -94,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_624_000 picoseconds. - Weight::from_parts(5_758_000, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(5_549_000, 0) } /// Storage: Preimage PreimageFor (r:1 w:1) /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) @@ -106,10 +110,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `179 + s * (1 ±0)` // Estimated: `3644 + s * (1 ±0)` - // Minimum execution time: 20_150_000 picoseconds. - Weight::from_parts(20_271_000, 3644) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_132, 0).saturating_mul(s.into())) + // Minimum execution time: 20_089_000 picoseconds. + Weight::from_parts(20_376_000, 3644) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_170, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -120,30 +124,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_451_000 picoseconds. - Weight::from_parts(7_693_000, 0) + // Minimum execution time: 6_998_000 picoseconds. + Weight::from_parts(7_303_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_477_000 picoseconds. - Weight::from_parts(5_733_000, 0) + // Minimum execution time: 5_078_000 picoseconds. + Weight::from_parts(5_315_000, 0) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_675_000 picoseconds. - Weight::from_parts(2_870_000, 0) + // Minimum execution time: 2_228_000 picoseconds. + Weight::from_parts(2_352_000, 0) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_697_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_371_000, 0) } /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) @@ -152,10 +156,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 13_921_000 picoseconds. - Weight::from_parts(18_717_223, 110487) - // Standard Error: 771 - .saturating_add(Weight::from_parts(333_102, 0).saturating_mul(s.into())) + // Minimum execution time: 12_683_000 picoseconds. + Weight::from_parts(16_951_846, 110487) + // Standard Error: 1_046 + .saturating_add(Weight::from_parts(380_842, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -168,10 +172,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(19_006_016, 110487) - // Standard Error: 1_115 - .saturating_add(Weight::from_parts(495_979, 0).saturating_mul(s.into())) + // Minimum execution time: 16_201_000 picoseconds. + Weight::from_parts(18_259_422, 110487) + // Standard Error: 1_344 + .saturating_add(Weight::from_parts(545_863, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -184,10 +188,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 17_240_000 picoseconds. - Weight::from_parts(24_376_370, 110487) - // Standard Error: 928 - .saturating_add(Weight::from_parts(331_209, 0).saturating_mul(s.into())) + // Minimum execution time: 16_180_000 picoseconds. + Weight::from_parts(25_128_925, 110487) + // Standard Error: 1_118 + .saturating_add(Weight::from_parts(375_631, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -200,10 +204,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 19_731_000 picoseconds. - Weight::from_parts(23_787_948, 110487) - // Standard Error: 1_133 - .saturating_add(Weight::from_parts(503_805, 0).saturating_mul(s.into())) + // Minimum execution time: 18_244_000 picoseconds. + Weight::from_parts(21_439_366, 110487) + // Standard Error: 1_084 + .saturating_add(Weight::from_parts(557_691, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -217,8 +221,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_776_000 picoseconds. - Weight::from_parts(3_992_000, 1489) + // Minimum execution time: 3_991_000 picoseconds. + Weight::from_parts(4_174_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -229,10 +233,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_418_000 picoseconds. - Weight::from_parts(8_606_012, 110487) - // Standard Error: 769 - .saturating_add(Weight::from_parts(309_376, 0).saturating_mul(s.into())) + // Minimum execution time: 3_581_000 picoseconds. + Weight::from_parts(7_413_174, 110487) + // Standard Error: 971 + .saturating_add(Weight::from_parts(348_077, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -240,8 +244,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_624_000 picoseconds. - Weight::from_parts(5_758_000, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(5_549_000, 0) } /// Storage: Preimage PreimageFor (r:1 w:1) /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) @@ -252,10 +256,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `179 + s * (1 ±0)` // Estimated: `3644 + s * (1 ±0)` - // Minimum execution time: 20_150_000 picoseconds. - Weight::from_parts(20_271_000, 3644) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_132, 0).saturating_mul(s.into())) + // Minimum execution time: 20_089_000 picoseconds. + Weight::from_parts(20_376_000, 3644) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_170, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -266,30 +270,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_451_000 picoseconds. - Weight::from_parts(7_693_000, 0) + // Minimum execution time: 6_998_000 picoseconds. + Weight::from_parts(7_303_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_477_000 picoseconds. - Weight::from_parts(5_733_000, 0) + // Minimum execution time: 5_078_000 picoseconds. + Weight::from_parts(5_315_000, 0) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_675_000 picoseconds. - Weight::from_parts(2_870_000, 0) + // Minimum execution time: 2_228_000 picoseconds. + Weight::from_parts(2_352_000, 0) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_697_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_371_000, 0) } /// Storage: Scheduler Agenda (r:1 w:1) /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) @@ -298,10 +302,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 13_921_000 picoseconds. - Weight::from_parts(18_717_223, 110487) - // Standard Error: 771 - .saturating_add(Weight::from_parts(333_102, 0).saturating_mul(s.into())) + // Minimum execution time: 12_683_000 picoseconds. + Weight::from_parts(16_951_846, 110487) + // Standard Error: 1_046 + .saturating_add(Weight::from_parts(380_842, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -314,10 +318,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(19_006_016, 110487) - // Standard Error: 1_115 - .saturating_add(Weight::from_parts(495_979, 0).saturating_mul(s.into())) + // Minimum execution time: 16_201_000 picoseconds. + Weight::from_parts(18_259_422, 110487) + // Standard Error: 1_344 + .saturating_add(Weight::from_parts(545_863, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -330,10 +334,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 17_240_000 picoseconds. - Weight::from_parts(24_376_370, 110487) - // Standard Error: 928 - .saturating_add(Weight::from_parts(331_209, 0).saturating_mul(s.into())) + // Minimum execution time: 16_180_000 picoseconds. + Weight::from_parts(25_128_925, 110487) + // Standard Error: 1_118 + .saturating_add(Weight::from_parts(375_631, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -346,10 +350,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 19_731_000 picoseconds. - Weight::from_parts(23_787_948, 110487) - // Standard Error: 1_133 - .saturating_add(Weight::from_parts(503_805, 0).saturating_mul(s.into())) + // Minimum execution time: 18_244_000 picoseconds. + Weight::from_parts(21_439_366, 110487) + // Standard Error: 1_084 + .saturating_add(Weight::from_parts(557_691, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index f38743e8b5f96..58dd23d241cf8 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -35,5 +35,12 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index f8fc0fecd492a..2bf70cbc574c8 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -170,7 +170,7 @@ pub mod pallet { /// Every `Period` blocks the `Members` are filled with the highest scoring /// members in the `Pool`. #[pallet::constant] - type Period: Get; + type Period: Get>; /// The receiver of the signal for when the membership has been initialized. /// This happens pre-genesis and will usually be the same as `MembershipChanged`. @@ -256,7 +256,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { let mut pool = self.pool.clone(); @@ -282,7 +282,7 @@ pub mod pallet { impl, I: 'static> Hooks> for Pallet { /// Every `Period` blocks the `Members` set is refreshed from the /// highest scoring members in the pool. - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: frame_system::pallet_prelude::BlockNumberFor) -> Weight { if n % T::Period::get() == Zero::zero() { let pool = >::get(); >::refresh_members(pool, ChangeReceiver::MembershipChanged); diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index f10a1320ef83c..d8c6ef9b0f444 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -22,25 +22,21 @@ use crate as pallet_scored_pool; use frame_support::{ bounded_vec, construct_runtime, ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, ScoredPool: pallet_scored_pool::{Pallet, Call, Storage, Config, Event}, } @@ -60,14 +56,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -93,7 +88,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -140,7 +135,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut balances = vec![]; for i in 1..31 { balances.push((i, 500_000)); diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index ccf3f7d86e497..0858ae6656540 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-trie = { version = "7.0.0", default-features = false, optional = true, path = "../../primitives/trie" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-trie = { version = "22.0.0", default-features = false, optional = true, path = "../../primitives/trie" } [features] default = ["historical", "std"] @@ -46,4 +46,9 @@ std = [ "sp-std/std", "sp-trie/std", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 4d1d9b4eda491..a26cacc561e82 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -19,19 +19,19 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../.. frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } scale-info = "2.1.1" frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support" } pallet-balances = { version = "4.0.0-dev", path = "../../balances" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } [features] default = ["std"] @@ -45,8 +45,20 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-std/std", + "frame-election-provider-support/std", + "pallet-balances/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index a7e326fb27ac3..cbf5d67ba567c 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -31,11 +31,11 @@ use frame_support::{ codec::Decode, traits::{Get, KeyOwnerProofSystem, OnInitialize}, }; -use frame_system::RawOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, - RewardDestination, + MaxNominationsOf, RewardDestination, }; const MAX_VALIDATORS: u32 = 1000; @@ -46,18 +46,18 @@ pub trait Config: { } -impl OnInitialize for Pallet { - fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { +impl OnInitialize> for Pallet { + fn on_initialize(n: BlockNumberFor) -> frame_support::weights::Weight { pallet_session::Pallet::::on_initialize(n) } } benchmarks! { set_keys { - let n = ::MaxNominations::get(); + let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, - ::MaxNominations::get(), + MaxNominationsOf::::get(), false, true, RewardDestination::Staked, @@ -72,10 +72,10 @@ benchmarks! { }: _(RawOrigin::Signed(v_controller), keys, proof) purge_keys { - let n = ::MaxNominations::get(); + let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, - ::MaxNominations::get(), + MaxNominationsOf::::get(), false, true, RewardDestination::Staked, @@ -156,7 +156,7 @@ fn check_membership_proof_setup( Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); } - Pallet::::on_initialize(T::BlockNumber::one()); + Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); // skip sessions until the new validator set is enacted while Session::::validators().len() < n as usize { diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index b7671255f68fb..24a821ac87af5 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -19,28 +19,26 @@ #![cfg(test)] -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::traits::IdentityLookup; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; type AccountId = u64; -type AccountIndex = u32; -type BlockNumber = u64; +type Nonce = u32; type Balance = u64; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, @@ -53,14 +51,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -86,7 +83,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -146,6 +143,7 @@ pallet_staking_reward_curve::build! { } parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -155,16 +153,14 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBounds; } impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -184,7 +180,8 @@ impl pallet_staking::Config for Test { type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type OnStakerSlash = (); + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -192,6 +189,6 @@ impl pallet_staking::Config for Test { impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); sp_io::TestExternalities::new(t) } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index f00a1c95e763e..04129e4eba130 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -377,17 +377,17 @@ pub(crate) mod tests { force_new_session, set_next_validators, NextValidators, Session, System, Test, }; - use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId, BuildStorage}; use frame_support::{ - traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + traits::{KeyOwnerProofSystem, OnInitialize}, BasicExternalities, }; type Historical = Pallet; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let keys: Vec<_> = NextValidators::get() .iter() .cloned() diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 4185788cbbed0..c44eea2f374c0 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -149,18 +149,18 @@ mod tests { crypto::key_types::DUMMY, offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind}, }; - use sp_runtime::testing::UintAuthorityId; + use sp_runtime::{testing::UintAuthorityId, BuildStorage}; use frame_support::{ - traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + traits::{KeyOwnerProofSystem, OnInitialize}, BasicExternalities, }; type Historical = Pallet; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = frame_system::GenesisConfig::::default() + .build_storage() .expect("Failed to create test externalities."); let keys: Vec<_> = NextValidators::get() diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 1219aaaf12a10..d2b1c2b744674 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -126,6 +126,7 @@ use frame_support::{ weights::Weight, Parameter, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, ConsensusEngineId, KeyTypeId, Permill, RuntimeAppPublic, @@ -393,12 +394,12 @@ pub mod pallet { type ValidatorIdOf: Convert>; /// Indicator for when to end the session. - type ShouldEndSession: ShouldEndSession; + type ShouldEndSession: ShouldEndSession>; /// Something that can predict the next session rotation. This should typically come from /// the same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort /// estimate. It is helpful to implement [`EstimateNextNewSession`]. - type NextSessionRotation: EstimateNextSessionRotation; + type NextSessionRotation: EstimateNextSessionRotation>; /// Handler for managing new session. type SessionManager: SessionManager; @@ -420,7 +421,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { panic!("Number of keys in session handler and session keys does not match"); @@ -559,7 +560,7 @@ pub mod pallet { impl Hooks> for Pallet { /// Called when a block is initialized. Will rotate session if it is the last /// block of the current session. - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); T::BlockWeights::get().max_block @@ -901,14 +902,14 @@ impl ValidatorSet for Pallet { } } -impl EstimateNextNewSession for Pallet { - fn average_session_length() -> T::BlockNumber { +impl EstimateNextNewSession> for Pallet { + fn average_session_length() -> BlockNumberFor { T::NextSessionRotation::average_session_length() } /// This session pallet always calls new_session and next_session at the same time, hence we /// do a simple proxy and pass the function to next rotation. - fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { + fn estimate_next_new_session(now: BlockNumberFor) -> (Option>, Weight) { T::NextSessionRotation::estimate_next_session_rotation(now) } } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index d6b8d9e207e02..b45ebfd25c44a 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -27,14 +27,15 @@ use std::collections::BTreeMap; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ impl_opaque_keys, - testing::{Header, UintAuthorityId}, + testing::UintAuthorityId, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; use sp_staking::SessionIndex; use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64}, BasicExternalities, }; @@ -75,17 +76,13 @@ impl OpaqueKeys for PreUpgradeMockSessionKeys { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; #[cfg(feature = "historical")] frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, Historical: pallet_session_historical::{Pallet}, } @@ -93,12 +90,9 @@ frame_support::construct_runtime!( #[cfg(not(feature = "historical"))] frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } ); @@ -215,7 +209,7 @@ pub fn reset_before_session_end_called() { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let keys: Vec<_> = NextValidators::get() .iter() .cloned() @@ -244,14 +238,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index add7f333590e1..dd9848fd2c177 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_session //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_session. pub trait WeightInfo { @@ -63,10 +67,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1891` - // Estimated: `12781` - // Minimum execution time: 48_507_000 picoseconds. - Weight::from_parts(49_214_000, 12781) + // Measured: `1924` + // Estimated: `12814` + // Minimum execution time: 55_459_000 picoseconds. + Weight::from_parts(56_180_000, 12814) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -78,10 +82,10 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1758` - // Estimated: `5223` - // Minimum execution time: 35_388_000 picoseconds. - Weight::from_parts(35_763_000, 5223) + // Measured: `1791` + // Estimated: `5256` + // Minimum execution time: 40_194_000 picoseconds. + Weight::from_parts(41_313_000, 5256) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -97,10 +101,10 @@ impl WeightInfo for () { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1891` - // Estimated: `12781` - // Minimum execution time: 48_507_000 picoseconds. - Weight::from_parts(49_214_000, 12781) + // Measured: `1924` + // Estimated: `12814` + // Minimum execution time: 55_459_000 picoseconds. + Weight::from_parts(56_180_000, 12814) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -112,10 +116,10 @@ impl WeightInfo for () { /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1758` - // Estimated: `5223` - // Minimum execution time: 35_388_000 picoseconds. - Weight::from_parts(35_763_000, 5223) + // Measured: `1791` + // Estimated: `5256` + // Minimum execution time: 40_194_000 picoseconds. + Weight::from_parts(41_313_000, 5256) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index ab74e39e4f83b..38f2ca66bd195 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -13,33 +13,58 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +log = { version = "0.4.17", default-features = false } rand_chacha = { version = "0.2", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } + +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] frame-support-test = { version = "3.0.0", path = "../support/test" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] +# Enable `VersionedRuntimeUpgrade` for the migrations that is currently still experimental. +experimental = [ + "frame-support/experimental" +] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "rand_chacha/std", "scale-info/std", "sp-runtime/std", "sp-std/std", + "sp-io/std", + "frame-support-test/std", + "pallet-balances/std", + "sp-arithmetic/std", + "sp-core/std" ] runtime-benchmarks = [ - "frame-system/runtime-benchmarks", + "frame-benchmarking", "sp-runtime/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-support-test/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/society/src/benchmarking.rs b/frame/society/src/benchmarking.rs new file mode 100644 index 0000000000000..20af6e35ada52 --- /dev/null +++ b/frame/society/src/benchmarking.rs @@ -0,0 +1,379 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Society pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; +use frame_system::RawOrigin; + +use sp_runtime::traits::Bounded; + +use crate::Pallet as Society; + +fn mock_balance_deposit, I: 'static>() -> BalanceOf { + T::Currency::minimum_balance().saturating_mul(1_000u32.into()) +} + +fn make_deposit, I: 'static>(who: &T::AccountId) -> BalanceOf { + let amount = mock_balance_deposit::(); + let required = amount.saturating_add(T::Currency::minimum_balance()); + if T::Currency::free_balance(who) < required { + T::Currency::make_free_balance_be(who, required); + } + T::Currency::reserve(who, amount).expect("Pre-funded account; qed"); + amount +} + +fn make_bid, I: 'static>( + who: &T::AccountId, +) -> BidKind> { + BidKind::Deposit(make_deposit::(who)) +} + +fn fund_society, I: 'static>() { + T::Currency::make_free_balance_be( + &Society::::account_id(), + BalanceOf::::max_value(), + ); + Pot::::put(&BalanceOf::::max_value()); +} + +// Set up Society +fn setup_society, I: 'static>() -> Result { + let origin = T::FounderSetOrigin::try_successful_origin().map_err(|_| "No origin")?; + let founder: T::AccountId = account("founder", 0, 0); + let founder_lookup: ::Source = T::Lookup::unlookup(founder.clone()); + let max_members = 5u32; + let max_intake = 3u32; + let max_strikes = 3u32; + Society::::found_society( + origin, + founder_lookup, + max_members, + max_intake, + max_strikes, + mock_balance_deposit::(), + b"benchmarking-society".to_vec(), + )?; + T::Currency::make_free_balance_be( + &Society::::account_id(), + T::Currency::minimum_balance(), + ); + T::Currency::make_free_balance_be(&Society::::payouts(), T::Currency::minimum_balance()); + Ok(founder) +} + +fn setup_funded_society, I: 'static>() -> Result { + let founder = setup_society::()?; + fund_society::(); + Ok(founder) +} + +fn add_candidate, I: 'static>( + name: &'static str, + tally: Tally, + skeptic_struck: bool, +) -> T::AccountId { + let candidate: T::AccountId = account(name, 0, 0); + let candidacy = Candidacy { + round: RoundCount::::get(), + kind: make_bid::(&candidate), + bid: 0u32.into(), + tally, + skeptic_struck, + }; + Candidates::::insert(&candidate, &candidacy); + candidate +} + +fn increment_round, I: 'static>() { + let mut round_count = RoundCount::::get(); + round_count.saturating_inc(); + RoundCount::::put(round_count); +} + +benchmarks_instance_pallet! { + bid { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(RawOrigin::Signed(caller.clone()), 10u32.into()) + verify { + let first_bid: Bid> = Bid { + who: caller.clone(), + kind: BidKind::Deposit(mock_balance_deposit::()), + value: 10u32.into(), + }; + assert_eq!(Bids::::get(), vec![first_bid]); + } + + unbid { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let mut bids = Bids::::get(); + Society::::insert_bid(&mut bids, &caller, 10u32.into(), make_bid::(&caller)); + Bids::::put(bids); + }: _(RawOrigin::Signed(caller.clone())) + verify { + assert_eq!(Bids::::get(), vec![]); + } + + vouch { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + let vouched: T::AccountId = account("vouched", 0, 0); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = Society::::insert_member(&caller, 1u32.into()); + let vouched_lookup: ::Source = T::Lookup::unlookup(vouched.clone()); + }: _(RawOrigin::Signed(caller.clone()), vouched_lookup, 0u32.into(), 0u32.into()) + verify { + let bids = Bids::::get(); + let vouched_bid: Bid> = Bid { + who: vouched.clone(), + kind: BidKind::Vouch(caller.clone(), 0u32.into()), + value: 0u32.into(), + }; + assert_eq!(bids, vec![vouched_bid]); + } + + unvouch { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + let vouched: T::AccountId = account("vouched", 0, 0); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let mut bids = Bids::::get(); + Society::::insert_bid(&mut bids, &caller, 10u32.into(), BidKind::Vouch(caller.clone(), 0u32.into())); + Bids::::put(bids); + }: _(RawOrigin::Signed(caller.clone())) + verify { + assert_eq!(Bids::::get(), vec![]); + } + + vote { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = Society::::insert_member(&caller, 1u32.into()); + let candidate = add_candidate::("candidate", Default::default(), false); + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); + }: _(RawOrigin::Signed(caller.clone()), candidate_lookup, true) + verify { + let maybe_vote: Vote = >::get(candidate.clone(), caller).unwrap(); + assert_eq!(maybe_vote.approve, true); + } + + defender_vote { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = Society::::insert_member(&caller, 1u32.into()); + let defender: T::AccountId = account("defender", 0, 0); + Defending::::put((defender, caller.clone(), Tally::default())); + }: _(RawOrigin::Signed(caller.clone()), false) + verify { + let round = RoundCount::::get(); + let skeptic_vote: Vote = DefenderVotes::::get(round, &caller).unwrap(); + assert_eq!(skeptic_vote.approve, false); + } + + payout { + let founder = setup_funded_society::()?; + // Payee's account already exists and is a member. + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, mock_balance_deposit::()); + let _ = Society::::insert_member(&caller, 0u32.into()); + // Introduce payout. + Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); + }: _(RawOrigin::Signed(caller.clone())) + verify { + let record = Payouts::::get(caller); + assert!(record.payouts.is_empty()); + } + + waive_repay { + let founder = setup_funded_society::()?; + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = Society::::insert_member(&caller, 0u32.into()); + Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); + }: _(RawOrigin::Signed(caller.clone()), 1u32.into()) + verify { + let record = Payouts::::get(caller); + assert!(record.payouts.is_empty()); + } + + found_society { + let founder: T::AccountId = whitelisted_caller(); + let can_found = T::FounderSetOrigin::try_successful_origin().map_err(|_| "No origin")?; + let founder_lookup: ::Source = T::Lookup::unlookup(founder.clone()); + }: _(can_found, founder_lookup, 5, 3, 3, mock_balance_deposit::(), b"benchmarking-society".to_vec()) + verify { + assert_eq!(Founder::::get(), Some(founder.clone())); + } + + dissolve { + let founder = setup_society::()?; + let members_and_candidates = vec![("m1", "c1"), ("m2", "c2"), ("m3", "c3"), ("m4", "c4")]; + let members_count = members_and_candidates.clone().len() as u32; + for (m, c) in members_and_candidates { + let member: T::AccountId = account(m, 0, 0); + let _ = Society::::insert_member(&member, 100u32.into()); + let candidate = add_candidate::(c, Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate); + let _ = Society::::vote(RawOrigin::Signed(member).into(), candidate_lookup, true); + } + // Leaving only Founder member. + MemberCount::::mutate(|i| { i.saturating_reduce(members_count) }); + }: _(RawOrigin::Signed(founder)) + verify { + assert_eq!(Founder::::get(), None); + } + + judge_suspended_member { + let founder = setup_society::()?; + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let _ = Society::::insert_member(&caller, 0u32.into()); + let _ = Society::::suspend_member(&caller); + }: _(RawOrigin::Signed(founder), caller_lookup, false) + verify { + assert_eq!(SuspendedMembers::::contains_key(&caller), false); + } + + set_parameters { + let founder = setup_society::()?; + let max_members = 10u32; + let max_intake = 10u32; + let max_strikes = 10u32; + let candidate_deposit: BalanceOf = 10u32.into(); + let params = GroupParams { max_members, max_intake, max_strikes, candidate_deposit }; + }: _(RawOrigin::Signed(founder), max_members, max_intake, max_strikes, candidate_deposit) + verify { + assert_eq!(Parameters::::get(), Some(params)); + } + + punish_skeptic { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Default::default(), false); + let skeptic: T::AccountId = account("skeptic", 0, 0); + let _ = Society::::insert_member(&skeptic, 0u32.into()); + Skeptic::::put(&skeptic); + if let Period::Voting { more, .. } = Society::::period() { + frame_system::Pallet::::set_block_number(frame_system::Pallet::::block_number() + more); + } + }: _(RawOrigin::Signed(candidate.clone())) + verify { + let candidacy = Candidates::::get(&candidate).unwrap(); + assert_eq!(candidacy.skeptic_struck, true); + } + + claim_membership { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 0u32.into() }, false); + increment_round::(); + }: _(RawOrigin::Signed(candidate.clone())) + verify { + assert!(!Candidates::::contains_key(&candidate)); + assert!(Members::::contains_key(&candidate)); + } + + bestow_membership { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 1u32.into() }, false); + increment_round::(); + }: _(RawOrigin::Signed(founder), candidate.clone()) + verify { + assert!(!Candidates::::contains_key(&candidate)); + assert!(Members::::contains_key(&candidate)); + } + + kick_candidate { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); + increment_round::(); + }: _(RawOrigin::Signed(founder), candidate.clone()) + verify { + assert!(!Candidates::::contains_key(&candidate)); + } + + resign_candidacy { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); + }: _(RawOrigin::Signed(candidate.clone())) + verify { + assert!(!Candidates::::contains_key(&candidate)); + } + + drop_candidate { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 3u32.into() }, false); + let caller: T::AccountId = whitelisted_caller(); + let _ = Society::::insert_member(&caller, 0u32.into()); + let mut round_count = RoundCount::::get(); + round_count = round_count.saturating_add(2u32); + RoundCount::::put(round_count); + }: _(RawOrigin::Signed(caller), candidate.clone()) + verify { + assert!(!Candidates::::contains_key(&candidate)); + } + + cleanup_candidacy { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); + let member_one: T::AccountId = account("one", 0, 0); + let member_two: T::AccountId = account("two", 0, 0); + let _ = Society::::insert_member(&member_one, 0u32.into()); + let _ = Society::::insert_member(&member_two, 0u32.into()); + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); + let _ = Society::::vote(RawOrigin::Signed(member_one.clone()).into(), candidate_lookup.clone(), true); + let _ = Society::::vote(RawOrigin::Signed(member_two.clone()).into(), candidate_lookup, true); + Candidates::::remove(&candidate); + }: _(RawOrigin::Signed(member_one), candidate.clone(), 5) + verify { + assert_eq!(Votes::::get(&candidate, &member_two), None); + } + + cleanup_challenge { + let founder = setup_society::()?; + ChallengeRoundCount::::put(1u32); + let member: T::AccountId = whitelisted_caller(); + let _ = Society::::insert_member(&member, 0u32.into()); + let defender: T::AccountId = account("defender", 0, 0); + Defending::::put((defender.clone(), member.clone(), Tally::default())); + let _ = Society::::defender_vote(RawOrigin::Signed(member.clone()).into(), true); + ChallengeRoundCount::::put(2u32); + let mut challenge_round = ChallengeRoundCount::::get(); + challenge_round = challenge_round.saturating_sub(1u32); + }: _(RawOrigin::Signed(member.clone()), challenge_round, 1u32) + verify { + assert_eq!(DefenderVotes::::get(challenge_round, &defender), None); + } + + impl_benchmark_test_suite!( + Society, + sp_io::TestExternalities::from( + as sp_runtime::BuildStorage>::build_storage( + &frame_system::GenesisConfig::default()).unwrap() + ), + crate::mock::Test + ); +} diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index d92bee48d6aa5..ca8d96e193c84 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -30,7 +30,6 @@ //! At any point, a user in the society can be one of a: //! * Bidder - A user who has submitted intention of joining the society. //! * Candidate - A user who will be voted on to join the society. -//! * Suspended Candidate - A user who failed to win a vote. //! * Member - A user who is a member of the society. //! * Suspended Member - A member of the society who has accumulated too many strikes //! or failed their membership challenge. @@ -251,11 +250,21 @@ mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub mod weights; + +pub mod migrations; + use frame_support::{ + impl_ensure_origin_with_arg_ignoring_arg, pallet_prelude::*, + storage::KeyLenOf, traits::{ - BalanceStatus, ChangeMembers, Currency, EnsureOrigin, ExistenceRequirement::AllowDeath, - Imbalance, OnUnbalanced, Randomness, ReservableCurrency, + BalanceStatus, Currency, EnsureOrigin, EnsureOriginWithArg, + ExistenceRequirement::AllowDeath, Imbalance, OnUnbalanced, Randomness, ReservableCurrency, + StorageVersion, }, PalletId, }; @@ -267,13 +276,16 @@ use rand_chacha::{ use scale_info::TypeInfo; use sp_runtime::{ traits::{ - AccountIdConversion, CheckedSub, Hash, IntegerSquareRoot, Saturating, StaticLookup, + AccountIdConversion, CheckedAdd, CheckedSub, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero, }, + ArithmeticError::Overflow, Percent, RuntimeDebug, }; use sp_std::prelude::*; +pub use weights::WeightInfo; + pub use pallet::*; type BalanceOf = @@ -283,15 +295,10 @@ type NegativeImbalanceOf = <>::Currency as Currency< >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -/// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub enum Vote { - /// The member has been chosen to be skeptic and has not yet taken any action. - Skeptic, - /// The member has rejected the candidate's application. - Reject, - /// The member approves of the candidate's application. - Approve, +pub struct Vote { + approve: bool, + weight: u32, } /// A judgement by the suspension judgement origin on a suspended candidate. @@ -342,10 +349,61 @@ pub struct Bid { value: Balance, } +/// The index of a round of candidates. +pub type RoundIndex = u32; + +/// The rank of a member. +pub type Rank = u32; + +/// The number of votes. +pub type VoteCount = u32; + +/// Tally of votes. +#[derive(Default, Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct Tally { + /// The approval votes. + approvals: VoteCount, + /// The rejection votes. + rejections: VoteCount, +} + +impl Tally { + fn more_approvals(&self) -> bool { + self.approvals > self.rejections + } + + fn more_rejections(&self) -> bool { + self.rejections > self.approvals + } + + fn clear_approval(&self) -> bool { + self.approvals >= (2 * self.rejections).max(1) + } + + fn clear_rejection(&self) -> bool { + self.rejections >= (2 * self.approvals).max(1) + } +} + +/// A bid for entry into society. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct Candidacy { + /// The index of the round where the candidacy began. + round: RoundIndex, + /// The kind of bid placed for this bidder/candidate. See `BidKind`. + kind: BidKind, + /// The reward that the bidder has requested for successfully joining the society. + bid: Balance, + /// The tally of votes so far. + tally: Tally, + /// True if the skeptic was already punished for note voting. + skeptic_struck: bool, +} + /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum BidKind { - /// The CandidateDeposit was paid for this bid. + /// The given deposit was paid for this bid. Deposit(Balance), /// A member vouched for this bid. The account should be reinstated into `Members` once the /// bid is successful (or if it is rescinded prior to launch). @@ -353,24 +411,64 @@ pub enum BidKind { } impl BidKind { - fn check_voucher(&self, v: &AccountId) -> DispatchResult { - if let BidKind::Vouch(ref a, _) = self { - if a == v { - Ok(()) - } else { - Err("incorrect identity".into()) - } - } else { - Err("not vouched".into()) - } + fn is_vouch(&self, v: &AccountId) -> bool { + matches!(self, BidKind::Vouch(ref a, _) if a == v) } } +pub type PayoutsFor = + BoundedVec<(BlockNumberFor, BalanceOf), >::MaxPayouts>; + +/// Information concerning a member. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct MemberRecord { + rank: Rank, + strikes: StrikeCount, + vouching: Option, + index: u32, +} + +/// Information concerning a member. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, Default)] +pub struct PayoutRecord { + paid: Balance, + payouts: PayoutsVec, +} + +pub type PayoutRecordFor = PayoutRecord< + BalanceOf, + BoundedVec<(BlockNumberFor, BalanceOf), >::MaxPayouts>, +>; + +/// Record for an individual new member who was elevated from a candidate recently. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct IntakeRecord { + who: AccountId, + bid: Balance, + round: RoundIndex, +} + +pub type IntakeRecordFor = + IntakeRecord<::AccountId, BalanceOf>; + +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct GroupParams { + max_members: u32, + max_intake: u32, + max_strikes: u32, + candidate_deposit: Balance, +} + +pub type GroupParamsFor = GroupParams>; + +pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] #[pallet::without_storage_info] pub struct Pallet(_); @@ -388,56 +486,51 @@ pub mod pallet { type Currency: ReservableCurrency; /// Something that provides randomness in the runtime. - type Randomness: Randomness; - - /// The minimum amount of a deposit required for a bid to be made. - #[pallet::constant] - type CandidateDeposit: Get>; - - /// The amount of the unpaid reward that gets deducted in the case that either a skeptic - /// doesn't vote or someone votes in the wrong way. - #[pallet::constant] - type WrongSideDeduction: Get>; + type Randomness: Randomness>; - /// The number of times a member may vote the wrong way (or not at all, when they are a - /// skeptic) before they become suspended. + /// The maximum number of strikes before a member gets funds slashed. #[pallet::constant] - type MaxStrikes: Get; + type GraceStrikes: Get; /// The amount of incentive paid within each period. Doesn't include VoterTip. #[pallet::constant] type PeriodSpend: Get>; - /// The receiver of the signal for when the members have changed. - type MembershipChanged: ChangeMembers; + /// The number of blocks on which new candidates should be voted on. Together with + /// `ClaimPeriod`, this sums to the number of blocks between candidate intake periods. + #[pallet::constant] + type VotingPeriod: Get>; - /// The number of blocks between candidate/membership rotation periods. + /// The number of blocks on which new candidates can claim their membership and be the + /// named head. #[pallet::constant] - type RotationPeriod: Get; + type ClaimPeriod: Get>; /// The maximum duration of the payout lock. #[pallet::constant] - type MaxLockDuration: Get; + type MaxLockDuration: Get>; /// The origin that is allowed to call `found`. type FounderSetOrigin: EnsureOrigin; - /// The origin that is allowed to make suspension judgements. - type SuspensionJudgementOrigin: EnsureOrigin; - /// The number of blocks between membership challenges. #[pallet::constant] - type ChallengePeriod: Get; + type ChallengePeriod: Get>; - /// The maximum number of candidates that we accept per round. + /// The maximum number of payouts a member may have waiting unclaimed. #[pallet::constant] - type MaxCandidateIntake: Get; + type MaxPayouts: Get; + + /// The maximum number of bids at once. + #[pallet::constant] + type MaxBids: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } #[pallet::error] pub enum Error { - /// An incorrect position was provided. - BadPosition, /// User is not a member. NotMember, /// User is already a member. @@ -455,7 +548,7 @@ pub mod pallet { /// Member is already vouching or banned from vouching again. AlreadyVouching, /// Member is not vouching. - NotVouching, + NotVouchingOnBidder, /// Cannot remove the head of the chain. Head, /// Cannot remove the founder. @@ -472,6 +565,36 @@ pub mod pallet { NotFounder, /// The caller is not the head. NotHead, + /// The membership cannot be claimed as the candidate was not clearly approved. + NotApproved, + /// The candidate cannot be kicked as the candidate was not clearly rejected. + NotRejected, + /// The candidacy cannot be dropped as the candidate was clearly approved. + Approved, + /// The candidacy cannot be bestowed as the candidate was clearly rejected. + Rejected, + /// The candidacy cannot be concluded as the voting is still in progress. + InProgress, + /// The candidacy cannot be pruned until a full additional intake period has passed. + TooEarly, + /// The skeptic already voted. + Voted, + /// The skeptic need not vote on candidates from expired rounds. + Expired, + /// User is not a bidder. + NotBidder, + /// There is no defender currently. + NoDefender, + /// Group doesn't exist. + NotGroup, + /// The member is already elevated to this rank. + AlreadyElevated, + /// The skeptic has already been punished for this offence. + AlreadyPunished, + /// Funds are insufficient to pay off society debts. + InsufficientFunds, + /// The candidate/defender has no stale votes to remove. + NoVotes, } #[pallet::event] @@ -506,136 +629,157 @@ pub mod pallet { Vote { candidate: T::AccountId, voter: T::AccountId, vote: bool }, /// A vote has been placed for a defending member DefenderVote { voter: T::AccountId, vote: bool }, - /// A new \[max\] member count has been set - NewMaxMembers { max: u32 }, + /// A new set of \[params\] has been set for the group. + NewParams { params: GroupParamsFor }, /// Society is unfounded. Unfounded { founder: T::AccountId }, /// Some funds were deposited into the society account. Deposit { value: BalanceOf }, - /// A group of members has been choosen as Skeptics - SkepticsChosen { skeptics: Vec }, + /// A \[member\] got elevated to \[rank\]. + Elevated { member: T::AccountId, rank: Rank }, } + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// The max number of members for the society at one time. + #[pallet::storage] + pub(super) type Parameters, I: 'static = ()> = + StorageValue<_, GroupParamsFor, OptionQuery>; + + /// Amount of our account balance that is specifically for the next round's bid(s). + #[pallet::storage] + pub type Pot, I: 'static = ()> = StorageValue<_, BalanceOf, ValueQuery>; + /// The first member. #[pallet::storage] - #[pallet::getter(fn founder)] pub type Founder, I: 'static = ()> = StorageValue<_, T::AccountId>; + /// The most primary from the most recently approved rank 0 members in the society. + #[pallet::storage] + pub type Head, I: 'static = ()> = StorageValue<_, T::AccountId>; + /// A hash of the rules of this society concerning membership. Can only be set once and /// only by the founder. #[pallet::storage] - #[pallet::getter(fn rules)] pub type Rules, I: 'static = ()> = StorageValue<_, T::Hash>; - /// The current set of candidates; bidders that are attempting to become members. + /// The current members and their rank. Doesn't include `SuspendedMembers`. #[pallet::storage] - #[pallet::getter(fn candidates)] - pub type Candidates, I: 'static = ()> = - StorageValue<_, Vec>>, ValueQuery>; + pub type Members, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, MemberRecord, OptionQuery>; - /// The set of suspended candidates. + /// Information regarding rank-0 payouts, past and future. #[pallet::storage] - #[pallet::getter(fn suspended_candidate)] - pub type SuspendedCandidates, I: 'static = ()> = StorageMap< - _, - Twox64Concat, - T::AccountId, - (BalanceOf, BidKind>), - >; + pub type Payouts, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, PayoutRecordFor, ValueQuery>; - /// Amount of our account balance that is specifically for the next round's bid(s). + /// The number of items in `Members` currently. (Doesn't include `SuspendedMembers`.) #[pallet::storage] - #[pallet::getter(fn pot)] - pub type Pot, I: 'static = ()> = StorageValue<_, BalanceOf, ValueQuery>; + pub type MemberCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; - /// The most primary from the most recently approved members. + /// The current items in `Members` keyed by their unique index. Keys are densely populated + /// `0..MemberCount` (does not include `MemberCount`). #[pallet::storage] - #[pallet::getter(fn head)] - pub type Head, I: 'static = ()> = StorageValue<_, T::AccountId>; + pub type MemberByIndex, I: 'static = ()> = + StorageMap<_, Twox64Concat, u32, T::AccountId, OptionQuery>; - /// The current set of members, ordered. + /// The set of suspended members, with their old membership record. #[pallet::storage] - #[pallet::getter(fn members)] - pub type Members, I: 'static = ()> = - StorageValue<_, Vec, ValueQuery>; + pub type SuspendedMembers, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, MemberRecord, OptionQuery>; - /// The set of suspended members. + /// The number of rounds which have passed. #[pallet::storage] - #[pallet::getter(fn suspended_member)] - pub type SuspendedMembers, I: 'static = ()> = - StorageMap<_, Twox64Concat, T::AccountId, bool, ValueQuery>; + pub type RoundCount, I: 'static = ()> = StorageValue<_, RoundIndex, ValueQuery>; /// The current bids, stored ordered by the value of the bid. #[pallet::storage] pub(super) type Bids, I: 'static = ()> = - StorageValue<_, Vec>>, ValueQuery>; + StorageValue<_, BoundedVec>, T::MaxBids>, ValueQuery>; - /// Members currently vouching or banned from vouching again #[pallet::storage] - #[pallet::getter(fn vouching)] - pub(super) type Vouching, I: 'static = ()> = - StorageMap<_, Twox64Concat, T::AccountId, VouchingStatus>; + pub type Candidates, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + Candidacy>, + OptionQuery, + >; + + /// The current skeptic. + #[pallet::storage] + pub type Skeptic, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; - /// Pending payouts; ordered by block number, with the amount that should be paid out. + /// Double map from Candidate -> Voter -> (Maybe) Vote. #[pallet::storage] - pub(super) type Payouts, I: 'static = ()> = StorageMap< + pub(super) type Votes, I: 'static = ()> = StorageDoubleMap< _, Twox64Concat, T::AccountId, - Vec<(T::BlockNumber, BalanceOf)>, - ValueQuery, + Twox64Concat, + T::AccountId, + Vote, + OptionQuery, >; - /// The ongoing number of losing votes cast by the member. + /// Clear-cursor for Vote, map from Candidate -> (Maybe) Cursor. #[pallet::storage] - pub(super) type Strikes, I: 'static = ()> = - StorageMap<_, Twox64Concat, T::AccountId, StrikeCount, ValueQuery>; + pub(super) type VoteClearCursor, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, BoundedVec>>>; - /// Double map from Candidate -> Voter -> (Maybe) Vote. + /// At the end of the claim period, this contains the most recently approved members (along with + /// their bid and round ID) who is from the most recent round with the lowest bid. They will + /// become the new `Head`. #[pallet::storage] - pub(super) type Votes, I: 'static = ()> = - StorageDoubleMap<_, Twox64Concat, T::AccountId, Twox64Concat, T::AccountId, Vote>; + pub type NextHead, I: 'static = ()> = + StorageValue<_, IntakeRecordFor, OptionQuery>; - /// The defending member currently being challenged. + /// The number of challenge rounds there have been. Used to identify stale DefenderVotes. #[pallet::storage] - #[pallet::getter(fn defender)] - pub(super) type Defender, I: 'static = ()> = StorageValue<_, T::AccountId>; + pub(super) type ChallengeRoundCount, I: 'static = ()> = + StorageValue<_, RoundIndex, ValueQuery>; - /// Votes for the defender. + /// The defending member currently being challenged, along with a running tally of votes. #[pallet::storage] - pub(super) type DefenderVotes, I: 'static = ()> = - StorageMap<_, Twox64Concat, T::AccountId, Vote>; + pub(super) type Defending, I: 'static = ()> = + StorageValue<_, (T::AccountId, T::AccountId, Tally)>; - /// The max number of members for the society at one time. + /// Votes for the defender, keyed by challenge round. #[pallet::storage] - #[pallet::getter(fn max_members)] - pub(super) type MaxMembers, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + pub(super) type DefenderVotes, I: 'static = ()> = + StorageDoubleMap<_, Twox64Concat, RoundIndex, Twox64Concat, T::AccountId, Vote>; #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { - fn on_initialize(n: T::BlockNumber) -> Weight { - let mut members = vec![]; - + fn on_initialize(n: BlockNumberFor) -> Weight { let mut weight = Weight::zero(); let weights = T::BlockWeights::get(); - // Run a candidate/membership rotation - if (n % T::RotationPeriod::get()).is_zero() { - members = >::get(); - Self::rotate_period(&mut members); + let phrase = b"society_rotation"; + // we'll need a random seed here. + // TODO: deal with randomness freshness + // https://github.com/paritytech/substrate/issues/8312 + let (seed, _) = T::Randomness::random(phrase); + // seed needs to be guaranteed to be 32 bytes. + let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed"); + let mut rng = ChaChaRng::from_seed(seed); - weight += weights.max_block / 20; + // Run a candidate/membership rotation + match Self::period() { + Period::Voting { elapsed, .. } if elapsed.is_zero() => { + Self::rotate_intake(&mut rng); + weight.saturating_accrue(weights.max_block / 20); + }, + _ => {}, } // Run a challenge rotation if (n % T::ChallengePeriod::get()).is_zero() { - // Only read members if not already read. - if members.is_empty() { - members = >::get(); - } - Self::rotate_challenge(&mut members); - - weight += weights.max_block / 20; + Self::rotate_challenge(&mut rng); + weight.saturating_accrue(weights.max_block / 20); } weight @@ -646,23 +790,12 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { pub pot: BalanceOf, - pub members: Vec, - pub max_members: u32, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { Pot::::put(self.pot); - MaxMembers::::put(self.max_members); - let first_member = self.members.first(); - if let Some(member) = first_member { - Founder::::put(member.clone()); - Head::::put(member.clone()); - }; - let mut m = self.members.clone(); - m.sort(); - Members::::put(m); } } @@ -670,37 +803,31 @@ pub mod pallet { impl, I: 'static> Pallet { /// A user outside of the society can make a bid for entry. /// - /// Payment: `CandidateDeposit` will be reserved for making a bid. It is returned + /// Payment: The group's Candidate Deposit will be reserved for making a bid. It is returned /// when the bid becomes a member, or if the bid calls `unbid`. /// /// The dispatch origin for this call must be _Signed_. /// /// Parameters: /// - `value`: A one time payment the bid would like to receive when joining the society. - /// - /// ## Complexity - /// - O(M + B + C + logM + logB + X) - /// - B (len of bids) - /// - C (len of candidates) - /// - M (len of members) - /// - X (balance reserve) #[pallet::call_index(0)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] + #[pallet::weight(T::WeightInfo::bid())] pub fn bid(origin: OriginFor, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!>::contains_key(&who), Error::::Suspended); - ensure!(!>::contains_key(&who), Error::::Suspended); - let bids = >::get(); - ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); - let candidates = >::get(); - ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); - let members = >::get(); - ensure!(!Self::is_member(&members, &who), Error::::AlreadyMember); - - let deposit = T::CandidateDeposit::get(); + + let mut bids = Bids::::get(); + ensure!(!Self::has_bid(&bids, &who), Error::::AlreadyBid); + ensure!(!Candidates::::contains_key(&who), Error::::AlreadyCandidate); + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + ensure!(!SuspendedMembers::::contains_key(&who), Error::::Suspended); + + let params = Parameters::::get().ok_or(Error::::NotGroup)?; + let deposit = params.candidate_deposit; + // NOTE: Reserve must happen before `insert_bid` since that could end up unreserving. T::Currency::reserve(&who, deposit)?; + Self::insert_bid(&mut bids, &who, value, BidKind::Deposit(deposit)); - Self::put_bid(bids, &who, value, BidKind::Deposit(deposit)); + Bids::::put(bids); Self::deposit_event(Event::::Bid { candidate_id: who, offer: value }); Ok(()) } @@ -712,40 +839,17 @@ pub mod pallet { /// Payment: The bid deposit is unreserved if the user made a bid. /// /// The dispatch origin for this call must be _Signed_ and a bidder. - /// - /// Parameters: - /// - `pos`: Position in the `Bids` vector of the bid who wants to unbid. - /// - /// ## Complexity - /// - O(B + X) - /// - B (len of bids) - /// - X (balance unreserve) #[pallet::call_index(1)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn unbid(origin: OriginFor, pos: u32) -> DispatchResult { + #[pallet::weight(T::WeightInfo::unbid())] + pub fn unbid(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - let pos = pos as usize; - >::mutate(|b| { - if pos < b.len() && b[pos].who == who { - // Either unreserve the deposit or free up the vouching member. - // In neither case can we do much if the action isn't completable, but there's - // no reason that either should fail. - match b.remove(pos).kind { - BidKind::Deposit(deposit) => { - let err_amount = T::Currency::unreserve(&who, deposit); - debug_assert!(err_amount.is_zero()); - }, - BidKind::Vouch(voucher, _) => { - >::remove(&voucher); - }, - } - Self::deposit_event(Event::::Unbid { candidate: who }); - Ok(()) - } else { - Err(Error::::BadPosition.into()) - } - }) + let mut bids = Bids::::get(); + let pos = bids.iter().position(|bid| bid.who == who).ok_or(Error::::NotBidder)?; + Self::clean_bid(&bids.remove(pos)); + Bids::::put(bids); + Self::deposit_event(Event::::Unbid { candidate: who }); + Ok(()) } /// As a member, vouch for someone to join society by placing a bid on their behalf. @@ -765,15 +869,8 @@ pub mod pallet { /// a member in the society. /// - `tip`: Your cut of the total `value` payout when the candidate is inducted into /// the society. Tips larger than `value` will be saturated upon payout. - /// - /// ## Complexity - /// - O(M + B + C + logM + logB + X) - /// - B (len of bids) - /// - C (len of candidates) - /// - M (len of members) - /// - X (balance reserve) #[pallet::call_index(2)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] + #[pallet::weight(T::WeightInfo::vouch())] pub fn vouch( origin: OriginFor, who: AccountIdLookupOf, @@ -782,23 +879,28 @@ pub mod pallet { ) -> DispatchResult { let voucher = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; - // Check user is not suspended. - ensure!(!>::contains_key(&who), Error::::Suspended); - ensure!(!>::contains_key(&who), Error::::Suspended); - // Check user is not a bid or candidate. - let bids = >::get(); - ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); - let candidates = >::get(); - ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); - // Check user is not already a member. - let members = >::get(); - ensure!(!Self::is_member(&members, &who), Error::::AlreadyMember); + + // Get bids and check user is not bidding. + let mut bids = Bids::::get(); + ensure!(!Self::has_bid(&bids, &who), Error::::AlreadyBid); + + // Check user is not already a candidate, member or suspended member. + ensure!(!Candidates::::contains_key(&who), Error::::AlreadyCandidate); + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + ensure!(!SuspendedMembers::::contains_key(&who), Error::::Suspended); + // Check sender can vouch. - ensure!(Self::is_member(&members, &voucher), Error::::NotMember); - ensure!(!>::contains_key(&voucher), Error::::AlreadyVouching); + let mut record = Members::::get(&voucher).ok_or(Error::::NotMember)?; + ensure!(record.vouching.is_none(), Error::::AlreadyVouching); + + // Update voucher record. + record.vouching = Some(VouchingStatus::Vouching); + // Update bids + Self::insert_bid(&mut bids, &who, value, BidKind::Vouch(voucher.clone(), tip)); - >::insert(&voucher, VouchingStatus::Vouching); - Self::put_bid(bids, &who, value, BidKind::Vouch(voucher.clone(), tip)); + // Write new state. + Members::::insert(&voucher, &record); + Bids::::put(bids); Self::deposit_event(Event::::Vouch { candidate_id: who, offer: value, @@ -814,31 +916,22 @@ pub mod pallet { /// /// Parameters: /// - `pos`: Position in the `Bids` vector of the bid who should be unvouched. - /// - /// ## Complexity - /// - O(B) - /// - B (len of bids) #[pallet::call_index(3)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn unvouch(origin: OriginFor, pos: u32) -> DispatchResult { + #[pallet::weight(T::WeightInfo::unvouch())] + pub fn unvouch(origin: OriginFor) -> DispatchResult { let voucher = ensure_signed(origin)?; - ensure!( - Self::vouching(&voucher) == Some(VouchingStatus::Vouching), - Error::::NotVouching - ); - let pos = pos as usize; - >::mutate(|b| { - if pos < b.len() { - b[pos].kind.check_voucher(&voucher)?; - >::remove(&voucher); - let who = b.remove(pos).who; - Self::deposit_event(Event::::Unvouch { candidate: who }); - Ok(()) - } else { - Err(Error::::BadPosition.into()) - } - }) + let mut bids = Bids::::get(); + let pos = bids + .iter() + .position(|bid| bid.kind.is_vouch(&voucher)) + .ok_or(Error::::NotVouchingOnBidder)?; + let bid = bids.remove(pos); + Self::clean_bid(&bid); + + Bids::::put(bids); + Self::deposit_event(Event::::Unvouch { candidate: bid.who }); + Ok(()) } /// As a member, vote on a candidate. @@ -849,30 +942,29 @@ pub mod pallet { /// - `candidate`: The candidate that the member would like to bid on. /// - `approve`: A boolean which says if the candidate should be approved (`true`) or /// rejected (`false`). - /// - /// ## Complexity - /// - O(M + logM + C) - /// - C (len of candidates) - /// - M (len of members) #[pallet::call_index(4)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] + #[pallet::weight(T::WeightInfo::vote())] pub fn vote( origin: OriginFor, candidate: AccountIdLookupOf, approve: bool, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let voter = ensure_signed(origin)?; let candidate = T::Lookup::lookup(candidate)?; - let candidates = >::get(); - ensure!(Self::is_candidate(&candidates, &candidate), Error::::NotCandidate); - let members = >::get(); - ensure!(Self::is_member(&members, &voter), Error::::NotMember); - let vote = if approve { Vote::Approve } else { Vote::Reject }; - >::insert(&candidate, &voter, vote); + let mut candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + let record = Members::::get(&voter).ok_or(Error::::NotMember)?; + + let first_time = Votes::::mutate(&candidate, &voter, |v| { + let first_time = v.is_none(); + *v = Some(Self::do_vote(*v, approve, record.rank, &mut candidacy.tally)); + first_time + }); + Candidates::::insert(&candidate, &candidacy); Self::deposit_event(Event::::Vote { candidate, voter, vote: approve }); - Ok(()) + Ok(if first_time { Pays::No } else { Pays::Yes }.into()) } /// As a member, vote on the defender. @@ -882,22 +974,24 @@ pub mod pallet { /// Parameters: /// - `approve`: A boolean which says if the candidate should be /// approved (`true`) or rejected (`false`). - /// - /// ## Complexity - /// - O(M + logM) - /// - M (len of members) #[pallet::call_index(5)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn defender_vote(origin: OriginFor, approve: bool) -> DispatchResult { + #[pallet::weight(T::WeightInfo::defender_vote())] + pub fn defender_vote(origin: OriginFor, approve: bool) -> DispatchResultWithPostInfo { let voter = ensure_signed(origin)?; - let members = >::get(); - ensure!(Self::is_member(&members, &voter), Error::::NotMember); - let vote = if approve { Vote::Approve } else { Vote::Reject }; - >::insert(&voter, vote); + let mut defending = Defending::::get().ok_or(Error::::NoDefender)?; + let record = Members::::get(&voter).ok_or(Error::::NotMember)?; + let round = ChallengeRoundCount::::get(); + let first_time = DefenderVotes::::mutate(round, &voter, |v| { + let first_time = v.is_none(); + *v = Some(Self::do_vote(*v, approve, record.rank, &mut defending.2)); + first_time + }); + + Defending::::put(defending); Self::deposit_event(Event::::DefenderVote { voter, vote: approve }); - Ok(()) + Ok(if first_time { Pays::No } else { Pays::Yes }.into()) } /// Transfer the first matured payout for the sender and remove it from the records. @@ -910,34 +1004,48 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_ and a member with /// payouts remaining. - /// - /// ## Complexity - /// - O(M + logM + P + X) - /// - M (len of members) - /// - P (number of payouts for a particular member) - /// - X (currency transfer call) #[pallet::call_index(6)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] + #[pallet::weight(T::WeightInfo::payout())] pub fn payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; + ensure!( + Members::::get(&who).ok_or(Error::::NotMember)?.rank == 0, + Error::::NoPayout + ); + let mut record = Payouts::::get(&who); - let members = >::get(); - ensure!(Self::is_member(&members, &who), Error::::NotMember); - - let mut payouts = >::get(&who); - if let Some((when, amount)) = payouts.first() { + if let Some((when, amount)) = record.payouts.first() { if when <= &>::block_number() { + record.paid = record.paid.checked_add(amount).ok_or(Overflow)?; T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; - payouts.remove(0); - if payouts.is_empty() { - >::remove(&who); - } else { - >::insert(&who, payouts); - } + record.payouts.remove(0); + Payouts::::insert(&who, record); return Ok(()) } } - Err(Error::::NoPayout.into()) + Err(Error::::NoPayout)? + } + + /// Repay the payment previously given to the member with the signed origin, remove any + /// pending payments, and elevate them from rank 0 to rank 1. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::waive_repay())] + pub fn waive_repay(origin: OriginFor, amount: BalanceOf) -> DispatchResult { + let who = ensure_signed(origin)?; + let mut record = Members::::get(&who).ok_or(Error::::NotMember)?; + let mut payout_record = Payouts::::get(&who); + ensure!(record.rank == 0, Error::::AlreadyElevated); + ensure!(amount >= payout_record.paid, Error::::InsufficientFunds); + + T::Currency::transfer(&who, &Self::account_id(), payout_record.paid, AllowDeath)?; + payout_record.paid = Zero::zero(); + payout_record.payouts.clear(); + record.rank = 1; + Members::::insert(&who, record); + Payouts::::insert(&who, payout_record); + Self::deposit_event(Event::::Elevated { member: who, rank: 1 }); + + Ok(()) } /// Found the society. @@ -950,54 +1058,71 @@ pub mod pallet { /// Parameters: /// - `founder` - The first member and head of the newly founded society. /// - `max_members` - The initial max number of members for the society. + /// - `max_intake` - The maximum number of candidates per intake period. + /// - `max_strikes`: The maximum number of strikes a member may get before they become + /// suspended and may only be reinstated by the founder. + /// - `candidate_deposit`: The deposit required to make a bid for membership of the group. /// - `rules` - The rules of this society concerning membership. /// - /// ## Complexity - /// - O(1) - #[pallet::call_index(7)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn found( + /// Complexity: O(1) + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::found_society())] + pub fn found_society( origin: OriginFor, founder: AccountIdLookupOf, max_members: u32, + max_intake: u32, + max_strikes: u32, + candidate_deposit: BalanceOf, rules: Vec, ) -> DispatchResult { T::FounderSetOrigin::ensure_origin(origin)?; let founder = T::Lookup::lookup(founder)?; - ensure!(!>::exists(), Error::::AlreadyFounded); + ensure!(!Head::::exists(), Error::::AlreadyFounded); ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... - >::put(max_members); - Self::add_member(&founder)?; - >::put(&founder); - >::put(&founder); + let params = GroupParams { max_members, max_intake, max_strikes, candidate_deposit }; + Parameters::::put(params); + Self::insert_member(&founder, 1)?; + Head::::put(&founder); + Founder::::put(&founder); Rules::::put(T::Hashing::hash(&rules)); Self::deposit_event(Event::::Founded { founder }); Ok(()) } - /// Annul the founding of the society. + /// Dissolve the society and remove all members. /// /// The dispatch origin for this call must be Signed, and the signing account must be both /// the `Founder` and the `Head`. This implies that it may only be done when there is one /// member. - /// - /// ## Complexity - /// - O(1) - #[pallet::call_index(8)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn unfound(origin: OriginFor) -> DispatchResult { + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::dissolve())] + pub fn dissolve(origin: OriginFor) -> DispatchResult { let founder = ensure_signed(origin)?; - ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); - ensure!(Head::::get() == Some(founder.clone()), Error::::NotHead); - - Members::::kill(); + ensure!(Founder::::get().as_ref() == Some(&founder), Error::::NotFounder); + ensure!(MemberCount::::get() == 1, Error::::NotHead); + + let _ = Members::::clear(u32::MAX, None); + MemberCount::::kill(); + let _ = MemberByIndex::::clear(u32::MAX, None); + let _ = SuspendedMembers::::clear(u32::MAX, None); + let _ = Payouts::::clear(u32::MAX, None); + let _ = Votes::::clear(u32::MAX, None); + let _ = VoteClearCursor::::clear(u32::MAX, None); Head::::kill(); + NextHead::::kill(); Founder::::kill(); Rules::::kill(); - Candidates::::kill(); - #[allow(deprecated)] - SuspendedCandidates::::remove_all(None); + Parameters::::kill(); + Pot::::kill(); + RoundCount::::kill(); + Bids::::kill(); + Skeptic::::kill(); + ChallengeRoundCount::::kill(); + Defending::::kill(); + let _ = DefenderVotes::::clear(u32::MAX, None); + let _ = Candidates::::clear(u32::MAX, None); Self::deposit_event(Event::::Unfounded { founder }); Ok(()) } @@ -1010,162 +1135,235 @@ pub mod pallet { /// If a suspended member is rejected, remove all associated storage items, including /// their payouts, and remove any vouched bids they currently have. /// - /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. + /// The dispatch origin for this call must be Signed from the Founder. /// /// Parameters: /// - `who` - The suspended member to be judged. /// - `forgive` - A boolean representing whether the suspension judgement origin forgives /// (`true`) or rejects (`false`) a suspended member. - /// - /// ## Complexity - /// - O(M + logM + B) - /// - B (len of bids) - /// - M (len of members) - #[pallet::call_index(9)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::judge_suspended_member())] pub fn judge_suspended_member( origin: OriginFor, who: AccountIdLookupOf, forgive: bool, - ) -> DispatchResult { - T::SuspensionJudgementOrigin::ensure_origin(origin)?; + ) -> DispatchResultWithPostInfo { + ensure!( + Some(ensure_signed(origin)?) == Founder::::get(), + Error::::NotFounder + ); let who = T::Lookup::lookup(who)?; - ensure!(>::contains_key(&who), Error::::NotSuspended); - + let record = SuspendedMembers::::get(&who).ok_or(Error::::NotSuspended)?; if forgive { // Try to add member back to society. Can fail with `MaxMembers` limit. - Self::add_member(&who)?; + Self::reinstate_member(&who, record.rank)?; } else { - // Cancel a suspended member's membership, remove their payouts. - >::remove(&who); - >::remove(&who); - // Remove their vouching status, potentially unbanning them in the future. - if >::take(&who) == Some(VouchingStatus::Vouching) { - // Try to remove their bid if they are vouching. - // If their vouch is already a candidate, do nothing. - >::mutate(|bids| - // Try to find the matching bid - if let Some(pos) = bids.iter().position(|b| b.kind.check_voucher(&who).is_ok()) { - // Remove the bid, and emit an event - let vouched = bids.remove(pos).who; - Self::deposit_event(Event::::Unvouch { candidate: vouched }); - } - ); - } + let payout_record = Payouts::::take(&who); + let total = payout_record + .payouts + .into_iter() + .map(|x| x.1) + .fold(Zero::zero(), |acc: BalanceOf, x| acc.saturating_add(x)); + Self::unreserve_payout(total); } - - >::remove(&who); + SuspendedMembers::::remove(&who); Self::deposit_event(Event::::SuspendedMemberJudgement { who, judged: forgive }); - Ok(()) + Ok(Pays::No.into()) } - /// Allow suspended judgement origin to make judgement on a suspended candidate. - /// - /// If the judgement is `Approve`, we add them to society as a member with the appropriate - /// payment for joining society. - /// - /// If the judgement is `Reject`, we either slash the deposit of the bid, giving it back - /// to the society treasury, or we ban the voucher from vouching again. - /// - /// If the judgement is `Rebid`, we put the candidate back in the bid pool and let them go - /// through the induction process again. + /// Change the maximum number of members in society and the maximum number of new candidates + /// in a single intake period. /// - /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. + /// The dispatch origin for this call must be Signed by the Founder. /// /// Parameters: - /// - `who` - The suspended candidate to be judged. - /// - `judgement` - `Approve`, `Reject`, or `Rebid`. - /// - /// ## Complexity - /// - O(M + logM + B + X) - /// - B (len of bids) - /// - M (len of members) - /// - X (balance action) - #[pallet::call_index(10)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn judge_suspended_candidate( + /// - `max_members` - The maximum number of members for the society. This must be no less + /// than the current number of members. + /// - `max_intake` - The maximum number of candidates per intake period. + /// - `max_strikes`: The maximum number of strikes a member may get before they become + /// suspended and may only be reinstated by the founder. + /// - `candidate_deposit`: The deposit required to make a bid for membership of the group. + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::set_parameters())] + pub fn set_parameters( origin: OriginFor, - who: AccountIdLookupOf, - judgement: Judgement, + max_members: u32, + max_intake: u32, + max_strikes: u32, + candidate_deposit: BalanceOf, ) -> DispatchResult { - T::SuspensionJudgementOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; - if let Some((value, kind)) = >::get(&who) { - match judgement { - Judgement::Approve => { - // Suspension Judgement origin has approved this candidate - // Make sure we can pay them - let pot = Self::pot(); - ensure!(pot >= value, Error::::InsufficientPot); - // Try to add user as a member! Can fail with `MaxMember` limit. - Self::add_member(&who)?; - // Reduce next pot by payout - >::put(pot - value); - // Add payout for new candidate - let maturity = >::block_number() + - Self::lock_duration(Self::members().len() as u32); - Self::pay_accepted_candidate(&who, value, kind, maturity); - }, - Judgement::Reject => { - // Founder has rejected this candidate - match kind { - BidKind::Deposit(deposit) => { - // Slash deposit and move it to the society account - let res = T::Currency::repatriate_reserved( - &who, - &Self::account_id(), - deposit, - BalanceStatus::Free, - ); - debug_assert!(res.is_ok()); - }, - BidKind::Vouch(voucher, _) => { - // Ban the voucher from vouching again - >::insert(&voucher, VouchingStatus::Banned); - }, - } - }, - Judgement::Rebid => { - // Founder has taken no judgement, and candidate is placed back into the - // pool. - let bids = >::get(); - Self::put_bid(bids, &who, value, kind); - }, - } + ensure!( + Some(ensure_signed(origin)?) == Founder::::get(), + Error::::NotFounder + ); + ensure!(max_members >= MemberCount::::get(), Error::::MaxMembers); + let params = GroupParams { max_members, max_intake, max_strikes, candidate_deposit }; + Parameters::::put(¶ms); + Self::deposit_event(Event::::NewParams { params }); + Ok(()) + } - // Remove suspended candidate - >::remove(who); - } else { - return Err(Error::::NotSuspended.into()) + /// Punish the skeptic with a strike if they did not vote on a candidate. Callable by the + /// candidate. + #[pallet::call_index(12)] + #[pallet::weight(T::WeightInfo::punish_skeptic())] + pub fn punish_skeptic(origin: OriginFor) -> DispatchResultWithPostInfo { + let candidate = ensure_signed(origin)?; + let mut candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + ensure!(!candidacy.skeptic_struck, Error::::AlreadyPunished); + ensure!(!Self::in_progress(candidacy.round), Error::::InProgress); + let punished = Self::check_skeptic(&candidate, &mut candidacy); + Candidates::::insert(&candidate, candidacy); + Ok(if punished { Pays::No } else { Pays::Yes }.into()) + } + + /// Transform an approved candidate into a member. Callable only by the + /// the candidate, and only after the period for voting has ended. + #[pallet::call_index(13)] + #[pallet::weight(T::WeightInfo::claim_membership())] + pub fn claim_membership(origin: OriginFor) -> DispatchResultWithPostInfo { + let candidate = ensure_signed(origin)?; + let candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + ensure!(candidacy.tally.clear_approval(), Error::::NotApproved); + ensure!(!Self::in_progress(candidacy.round), Error::::InProgress); + Self::induct_member(candidate, candidacy, 0)?; + Ok(Pays::No.into()) + } + + /// Transform an approved candidate into a member. Callable only by the Signed origin of the + /// Founder, only after the period for voting has ended and only when the candidate is not + /// clearly rejected. + #[pallet::call_index(14)] + #[pallet::weight(T::WeightInfo::bestow_membership())] + pub fn bestow_membership( + origin: OriginFor, + candidate: T::AccountId, + ) -> DispatchResultWithPostInfo { + ensure!( + Some(ensure_signed(origin)?) == Founder::::get(), + Error::::NotFounder + ); + let candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + ensure!(!candidacy.tally.clear_rejection(), Error::::Rejected); + ensure!(!Self::in_progress(candidacy.round), Error::::InProgress); + Self::induct_member(candidate, candidacy, 0)?; + Ok(Pays::No.into()) + } + + /// Remove the candidate's application from the society. Callable only by the Signed origin + /// of the Founder, only after the period for voting has ended, and only when they do not + /// have a clear approval. + /// + /// Any bid deposit is lost and voucher is banned. + #[pallet::call_index(15)] + #[pallet::weight(T::WeightInfo::kick_candidate())] + pub fn kick_candidate( + origin: OriginFor, + candidate: T::AccountId, + ) -> DispatchResultWithPostInfo { + ensure!( + Some(ensure_signed(origin)?) == Founder::::get(), + Error::::NotFounder + ); + let mut candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + ensure!(!Self::in_progress(candidacy.round), Error::::InProgress); + ensure!(!candidacy.tally.clear_approval(), Error::::Approved); + Self::check_skeptic(&candidate, &mut candidacy); + Self::reject_candidate(&candidate, &candidacy.kind); + Candidates::::remove(&candidate); + Ok(Pays::No.into()) + } + + /// Remove the candidate's application from the society. Callable only by the candidate. + /// + /// Any bid deposit is lost and voucher is banned. + #[pallet::call_index(16)] + #[pallet::weight(T::WeightInfo::resign_candidacy())] + pub fn resign_candidacy(origin: OriginFor) -> DispatchResultWithPostInfo { + let candidate = ensure_signed(origin)?; + let mut candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + if !Self::in_progress(candidacy.round) { + Self::check_skeptic(&candidate, &mut candidacy); } - Ok(()) + Self::reject_candidate(&candidate, &candidacy.kind); + Candidates::::remove(&candidate); + Ok(Pays::No.into()) } - /// Allows root origin to change the maximum number of members in society. - /// Max membership count must be greater than 1. + /// Remove a `candidate`'s failed application from the society. Callable by any + /// signed origin but only at the end of the subsequent round and only for + /// a candidate with more rejections than approvals. /// - /// The dispatch origin for this call must be from _ROOT_. + /// The bid deposit is lost and the voucher is banned. + #[pallet::call_index(17)] + #[pallet::weight(T::WeightInfo::drop_candidate())] + pub fn drop_candidate( + origin: OriginFor, + candidate: T::AccountId, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + let candidacy = + Candidates::::get(&candidate).ok_or(Error::::NotCandidate)?; + ensure!(candidacy.tally.clear_rejection(), Error::::NotRejected); + ensure!(RoundCount::::get() > candidacy.round + 1, Error::::TooEarly); + Self::reject_candidate(&candidate, &candidacy.kind); + Candidates::::remove(&candidate); + Ok(Pays::No.into()) + } + + /// Remove up to `max` stale votes for the given `candidate`. /// - /// Parameters: - /// - `max` - The maximum number of members for the society. + /// May be called by any Signed origin, but only after the candidate's candidacy is ended. + #[pallet::call_index(18)] + #[pallet::weight(T::WeightInfo::cleanup_candidacy())] + pub fn cleanup_candidacy( + origin: OriginFor, + candidate: T::AccountId, + max: u32, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + ensure!(!Candidates::::contains_key(&candidate), Error::::InProgress); + let maybe_cursor = VoteClearCursor::::get(&candidate); + let r = + Votes::::clear_prefix(&candidate, max, maybe_cursor.as_ref().map(|x| &x[..])); + if let Some(cursor) = r.maybe_cursor { + VoteClearCursor::::insert(&candidate, BoundedVec::truncate_from(cursor)); + } + Ok(if r.loops == 0 { Pays::Yes } else { Pays::No }.into()) + } + + /// Remove up to `max` stale votes for the defender in the given `challenge_round`. /// - /// ## Complexity - /// - O(1) - #[pallet::call_index(11)] - #[pallet::weight(T::BlockWeights::get().max_block / 10)] - pub fn set_max_members(origin: OriginFor, max: u32) -> DispatchResult { - ensure_root(origin)?; - ensure!(max > 1, Error::::MaxMembers); - MaxMembers::::put(max); - Self::deposit_event(Event::::NewMaxMembers { max }); - Ok(()) + /// May be called by any Signed origin, but only after the challenge round is ended. + #[pallet::call_index(19)] + #[pallet::weight(T::WeightInfo::cleanup_challenge())] + pub fn cleanup_challenge( + origin: OriginFor, + challenge_round: RoundIndex, + max: u32, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + ensure!( + challenge_round < ChallengeRoundCount::::get(), + Error::::InProgress + ); + let _ = DefenderVotes::::clear_prefix(challenge_round, max, None); + // clear_prefix() v2 is always returning backend = 0, ignoring it till v3. + // let (_, backend, _, _) = r.deconstruct(); + // if backend == 0 { return Err(Error::::NoVotes.into()); }; + Ok(Pays::No.into()) } } } /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin<::RuntimeOrigin> for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::RuntimeOrigin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { @@ -1181,380 +1379,525 @@ impl EnsureOrigin for EnsureFounder { } } -/// Pick an item at pseudo-random from the slice, given the `rng`. `None` iff the slice is empty. -fn pick_item<'a, R: RngCore, T>(rng: &mut R, items: &'a [T]) -> Option<&'a T> { - if items.is_empty() { - None - } else { - Some(&items[pick_usize(rng, items.len() - 1)]) +impl_ensure_origin_with_arg_ignoring_arg! { + impl<{ T: Config, A }> + EnsureOriginWithArg for EnsureFounder + {} +} + +struct InputFromRng<'a, T>(&'a mut T); +impl<'a, T: RngCore> codec::Input for InputFromRng<'a, T> { + fn remaining_len(&mut self) -> Result, codec::Error> { + return Ok(None) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + self.0.fill_bytes(into); + Ok(()) } } -/// Pick a new PRN, in the range [0, `max`] (inclusive). -fn pick_usize(rng: &mut R, max: usize) -> usize { - (rng.next_u32() % (max as u32 + 1)) as usize +pub enum Period { + Voting { elapsed: BlockNumber, more: BlockNumber }, + Claim { elapsed: BlockNumber, more: BlockNumber }, } impl, I: 'static> Pallet { - /// Puts a bid into storage ordered by smallest to largest value. - /// Allows a maximum of 1000 bids in queue, removing largest value people first. - fn put_bid( - mut bids: Vec>>, - who: &T::AccountId, - value: BalanceOf, - bid_kind: BidKind>, - ) { - const MAX_BID_COUNT: usize = 1000; - - match bids.binary_search_by(|bid| bid.value.cmp(&value)) { - // Insert new elements after the existing ones. This ensures new bids - // with the same bid value are further down the list than existing ones. - Ok(pos) => { - let different_bid = bids - .iter() - // Easily extract the index we are on - .enumerate() - // Skip ahead to the suggested position - .skip(pos) - // Keep skipping ahead until the position changes - // Get the element when things changed - .find(|(_, x)| x.value > bids[pos].value); - - // If the element is not at the end of the list, insert the new element - // in the spot. - if let Some((p, _)) = different_bid { - bids.insert(p, Bid { value, who: who.clone(), kind: bid_kind }); - // If the element is at the end of the list, push the element on the end. + /// Get the period we are currently in. + fn period() -> Period> { + let claim_period = T::ClaimPeriod::get(); + let voting_period = T::VotingPeriod::get(); + let rotation_period = voting_period + claim_period; + let now = frame_system::Pallet::::block_number(); + let phase = now % rotation_period; + if phase < voting_period { + Period::Voting { elapsed: phase, more: voting_period - phase } + } else { + Period::Claim { elapsed: phase - voting_period, more: rotation_period - phase } + } + } + + /// Returns true if the given `target_round` is still in its initial voting phase. + fn in_progress(target_round: RoundIndex) -> bool { + let round = RoundCount::::get(); + target_round == round && matches!(Self::period(), Period::Voting { .. }) + } + + /// Returns the new vote. + fn do_vote(maybe_old: Option, approve: bool, rank: Rank, tally: &mut Tally) -> Vote { + match maybe_old { + Some(Vote { approve: true, weight }) => tally.approvals.saturating_reduce(weight), + Some(Vote { approve: false, weight }) => tally.rejections.saturating_reduce(weight), + _ => {}, + } + let weight_root = rank + 1; + let weight = weight_root * weight_root; + match approve { + true => tally.approvals.saturating_accrue(1), + false => tally.rejections.saturating_accrue(1), + } + Vote { approve, weight } + } + + /// Returns `true` if a punishment was given. + fn check_skeptic( + candidate: &T::AccountId, + candidacy: &mut Candidacy>, + ) -> bool { + if RoundCount::::get() != candidacy.round || candidacy.skeptic_struck { + return false + } + // We expect the skeptic to have voted. + let skeptic = match Skeptic::::get() { + Some(s) => s, + None => return false, + }; + let maybe_vote = Votes::::get(&candidate, &skeptic); + let approved = candidacy.tally.clear_approval(); + let rejected = candidacy.tally.clear_rejection(); + match (maybe_vote, approved, rejected) { + (None, _, _) | + (Some(Vote { approve: true, .. }), false, true) | + (Some(Vote { approve: false, .. }), true, false) => { + // Can't do much if the punishment doesn't work out. + if Self::strike_member(&skeptic).is_ok() { + candidacy.skeptic_struck = true; + true } else { - bids.push(Bid { value, who: who.clone(), kind: bid_kind }); + false } }, - Err(pos) => bids.insert(pos, Bid { value, who: who.clone(), kind: bid_kind }), + _ => false, } - // Keep it reasonably small. - if bids.len() > MAX_BID_COUNT { - let Bid { who: popped, kind, .. } = bids.pop().expect("b.len() > 1000; qed"); - match kind { - BidKind::Deposit(deposit) => { - let err_amount = T::Currency::unreserve(&popped, deposit); - debug_assert!(err_amount.is_zero()); - }, - BidKind::Vouch(voucher, _) => { - >::remove(&voucher); + } + + /// End the current challenge period and start a new one. + fn rotate_challenge(rng: &mut impl RngCore) { + let mut next_defender = None; + let mut round = ChallengeRoundCount::::get(); + + // End current defender rotation + if let Some((defender, skeptic, tally)) = Defending::::get() { + // We require strictly more approvals, since the member should be voting for themselves. + if !tally.more_approvals() { + // Member has failed the challenge: Suspend them. This will fail if they are Head + // or Founder, in which case we ignore. + let _ = Self::suspend_member(&defender); + } + + // Check defender skeptic voted and that their vote was with the majority. + let skeptic_vote = DefenderVotes::::get(round, &skeptic); + match (skeptic_vote, tally.more_approvals(), tally.more_rejections()) { + (None, _, _) | + (Some(Vote { approve: true, .. }), false, true) | + (Some(Vote { approve: false, .. }), true, false) => { + // Punish skeptic and challenge them next. + let _ = Self::strike_member(&skeptic); + let founder = Founder::::get(); + let head = Head::::get(); + if Some(&skeptic) != founder.as_ref() && Some(&skeptic) != head.as_ref() { + next_defender = Some(skeptic); + } }, + _ => {}, } - Self::deposit_event(Event::::AutoUnbid { candidate: popped }); + round.saturating_inc(); + ChallengeRoundCount::::put(round); } - >::put(bids); + // Avoid challenging if there's only two members since we never challenge the Head or + // the Founder. + if MemberCount::::get() > 2 { + let defender = next_defender + .or_else(|| Self::pick_defendent(rng)) + .expect("exited if members empty; qed"); + let skeptic = + Self::pick_member_except(rng, &defender).expect("exited if members empty; qed"); + Self::deposit_event(Event::::Challenged { member: defender.clone() }); + Defending::::put((defender, skeptic, Tally::default())); + } else { + Defending::::kill(); + } } - /// Check a user is a bid. - fn is_bid(bids: &Vec>>, who: &T::AccountId) -> bool { - // Bids are ordered by `value`, so we cannot binary search for a user. - bids.iter().any(|bid| bid.who == *who) + /// End the current intake period and begin a new one. + /// + /// --------------------------------------------- + /// #10 || #11 _ || #12 + /// || Voting | Claiming || + /// --------------------------------------------- + fn rotate_intake(rng: &mut impl RngCore) { + // We assume there's at least one member or this logic won't work. + let member_count = MemberCount::::get(); + if member_count < 1 { + return + } + let maybe_head = NextHead::::take(); + if let Some(head) = maybe_head { + Head::::put(&head.who); + } + + // Bump the pot by at most `PeriodSpend`, but less if there's not very much left in our + // account. + let mut pot = Pot::::get(); + let unaccounted = T::Currency::free_balance(&Self::account_id()).saturating_sub(pot); + pot.saturating_accrue(T::PeriodSpend::get().min(unaccounted / 2u8.into())); + Pot::::put(&pot); + + // Bump round and create the new intake. + let mut round_count = RoundCount::::get(); + round_count.saturating_inc(); + let candidate_count = Self::select_new_candidates(round_count, member_count, pot); + if candidate_count > 0 { + // Select a member at random and make them the skeptic for this round. + let skeptic = Self::pick_member(rng).expect("exited if members empty; qed"); + Skeptic::::put(skeptic); + } + RoundCount::::put(round_count); } - /// Check a user is a candidate. - fn is_candidate( - candidates: &Vec>>, - who: &T::AccountId, - ) -> bool { - // Looking up a candidate is the same as looking up a bid - Self::is_bid(candidates, who) + /// Remove a selection of bidding accounts such that the total bids is no greater than `Pot` and + /// the number of bids would not surpass `MaxMembers` if all were accepted. At most one bid may + /// be zero. + /// + /// Candidates are inserted from each bidder. + /// + /// The number of candidates inserted are returned. + pub fn select_new_candidates( + round: RoundIndex, + member_count: u32, + pot: BalanceOf, + ) -> u32 { + // Get the number of left-most bidders whose bids add up to less than `pot`. + let mut bids = Bids::::get(); + let params = match Parameters::::get() { + Some(params) => params, + None => return 0, + }; + let max_selections: u32 = params + .max_intake + .min(params.max_members.saturating_sub(member_count)) + .min(bids.len() as u32); + + let mut selections = 0; + // A running total of the cost to onboard these bids + let mut total_cost: BalanceOf = Zero::zero(); + + bids.retain(|bid| { + // We only accept a zero bid as the first selection. + total_cost.saturating_accrue(bid.value); + let accept = selections < max_selections && + (!bid.value.is_zero() || selections == 0) && + total_cost <= pot; + if accept { + let candidacy = Candidacy { + round, + kind: bid.kind.clone(), + bid: bid.value, + tally: Default::default(), + skeptic_struck: false, + }; + Candidates::::insert(&bid.who, candidacy); + selections.saturating_inc(); + } + !accept + }); + + // No need to reset Bids if we're not taking anything. + Bids::::put(&bids); + selections } - /// Check a user is a member. - fn is_member(members: &Vec, who: &T::AccountId) -> bool { - members.binary_search(who).is_ok() + /// Puts a bid into storage ordered by smallest to largest value. + /// Allows a maximum of 1000 bids in queue, removing largest value people first. + fn insert_bid( + bids: &mut BoundedVec>, T::MaxBids>, + who: &T::AccountId, + value: BalanceOf, + bid_kind: BidKind>, + ) { + let pos = bids.iter().position(|bid| bid.value > value).unwrap_or(bids.len()); + let r = bids.force_insert_keep_left(pos, Bid { value, who: who.clone(), kind: bid_kind }); + let maybe_discarded = match r { + Ok(x) => x, + Err(x) => Some(x), + }; + if let Some(discarded) = maybe_discarded { + Self::clean_bid(&discarded); + Self::deposit_event(Event::::AutoUnbid { candidate: discarded.who }); + } } - /// Add a member to the sorted members list. If the user is already a member, do nothing. - /// Can fail when `MaxMember` limit is reached, but has no side-effects. - fn add_member(who: &T::AccountId) -> DispatchResult { - let mut members = >::get(); - ensure!(members.len() < MaxMembers::::get() as usize, Error::::MaxMembers); - match members.binary_search(who) { - // Add the new member - Err(i) => { - members.insert(i, who.clone()); - T::MembershipChanged::change_members_sorted(&[who.clone()], &[], &members); - >::put(members); - Ok(()) + /// Either unreserve the deposit or free up the vouching member. + /// + /// In neither case can we do much if the action isn't completable, but there's + /// no reason that either should fail. + /// + /// WARNING: This alters the voucher item of `Members`. You must ensure that you do not + /// accidentally overwrite it with an older value after calling this. + fn clean_bid(bid: &Bid>) { + match &bid.kind { + BidKind::Deposit(deposit) => { + let err_amount = T::Currency::unreserve(&bid.who, *deposit); + debug_assert!(err_amount.is_zero()); + }, + BidKind::Vouch(voucher, _) => { + Members::::mutate_extant(voucher, |record| record.vouching = None); }, - // User is already a member, do nothing. - Ok(_) => Ok(()), } } - /// Remove a member from the members list, except the Head. + /// Either repatriate the deposit into the Society account or ban the vouching member. + /// + /// In neither case can we do much if the action isn't completable, but there's + /// no reason that either should fail. /// - /// NOTE: This does not correctly clean up a member from storage. It simply - /// removes them from the Members storage item. - pub fn remove_member(m: &T::AccountId) -> DispatchResult { - ensure!(Self::head() != Some(m.clone()), Error::::Head); - ensure!(Self::founder() != Some(m.clone()), Error::::Founder); - - let mut members = >::get(); - match members.binary_search(m) { - Err(_) => Err(Error::::NotMember.into()), - Ok(i) => { - members.remove(i); - T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); - >::put(members); - Ok(()) + /// WARNING: This alters the voucher item of `Members`. You must ensure that you do not + /// accidentally overwrite it with an older value after calling this. + fn reject_candidate(who: &T::AccountId, kind: &BidKind>) { + match kind { + BidKind::Deposit(deposit) => { + let pot = Self::account_id(); + let free = BalanceStatus::Free; + let r = T::Currency::repatriate_reserved(&who, &pot, *deposit, free); + debug_assert!(r.is_ok()); + }, + BidKind::Vouch(voucher, _) => { + Members::::mutate_extant(voucher, |record| { + record.vouching = Some(VouchingStatus::Banned) + }); }, } } - /// End the current period and begin a new one. - fn rotate_period(members: &mut Vec) { - let phrase = b"society_rotation"; - - let mut pot = >::get(); - - // we'll need a random seed here. - // TODO: deal with randomness freshness - // https://github.com/paritytech/substrate/issues/8312 - let (seed, _) = T::Randomness::random(phrase); - // seed needs to be guaranteed to be 32 bytes. - let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed"); - let mut rng = ChaChaRng::from_seed(seed); - - // we assume there's at least one member or this logic won't work. - if !members.is_empty() { - let candidates = >::take(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no - // consensus critical issues or side-effects. This is auto-correcting as members fall - // out of society. - members.reserve(candidates.len()); - - let maturity = >::block_number() + - Self::lock_duration(members.len() as u32); - - let mut rewardees = Vec::new(); - let mut total_approvals = 0; - let mut total_slash = >::zero(); - let mut total_payouts = >::zero(); - - let accepted = candidates - .into_iter() - .filter_map(|Bid { value, who: candidate, kind }| { - let mut approval_count = 0; - - // Creates a vector of (vote, member) for the given candidate - // and tallies total number of approve votes for that candidate. - let votes = members - .iter() - .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) - .inspect(|&(v, _)| { - if v == Vote::Approve { - approval_count += 1 - } - }) - .collect::>(); - - // Select one of the votes at random. - // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. - let is_accepted = - pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); - - let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; - - let bad_vote = |m: &T::AccountId| { - // Voter voted wrong way (or was just a lazy skeptic) then reduce their - // payout and increase their strikes. after MaxStrikes then they go into - // suspension. - let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); - - let strikes = >::mutate(m, |s| { - *s += 1; - *s - }); - if strikes >= T::MaxStrikes::get() { - Self::suspend_member(m); - } - amount - }; - - // Collect the voters who had a matching vote. - rewardees.extend( - votes - .into_iter() - .filter_map(|(v, m)| { - if v == matching_vote { - Some(m) - } else { - total_slash += bad_vote(m); - None - } - }) - .cloned(), - ); - - if is_accepted { - total_approvals += approval_count; - total_payouts += value; - members.push(candidate.clone()); - - Self::pay_accepted_candidate(&candidate, value, kind, maturity); - - // We track here the total_approvals so that every candidate has a unique - // range of numbers from 0 to `total_approvals` with length `approval_count` - // so each candidate is proportionally represented when selecting a - // "primary" below. - Some((candidate, total_approvals, value)) - } else { - // Suspend Candidate - >::insert(&candidate, (value, kind)); - Self::deposit_event(Event::::CandidateSuspended { candidate }); - None - } - }) - .collect::>(); - - // Clean up all votes. - #[allow(deprecated)] - >::remove_all(None); - - // Reward one of the voters who voted the right way. - if !total_slash.is_zero() { - if let Some(winner) = pick_item(&mut rng, &rewardees) { - // If we can't reward them, not much that can be done. - Self::bump_payout(winner, maturity, total_slash); - } else { - // Move the slashed amount back from payouts account to local treasury. - let res = T::Currency::transfer( - &Self::payouts(), - &Self::account_id(), - total_slash, - AllowDeath, - ); - debug_assert!(res.is_ok()); - } - } + /// Check a user has a bid. + fn has_bid(bids: &Vec>>, who: &T::AccountId) -> bool { + // Bids are ordered by `value`, so we cannot binary search for a user. + bids.iter().any(|bid| bid.who == *who) + } - // Fund the total payouts from the local treasury. - if !total_payouts.is_zero() { - // remove payout from pot and shift needed funds to the payout account. - pot = pot.saturating_sub(total_payouts); - - // this should never fail since we ensure we can afford the payouts in a previous - // block, but there's not much we can do to recover if it fails anyway. - let res = T::Currency::transfer( - &Self::account_id(), - &Self::payouts(), - total_payouts, - AllowDeath, - ); - debug_assert!(res.is_ok()); - } + /// Add a member to the members list. If the user is already a member, do nothing. Can fail when + /// `MaxMember` limit is reached, but in that case it has no side-effects. + /// + /// Set the `payouts` for the member. NOTE: This *WILL NOT RESERVE THE FUNDS TO MAKE THE + /// PAYOUT*. Only set this to be non-empty if you already have the funds reserved in the Payouts + /// account. + /// + /// NOTE: Generally you should not use this, and instead use `add_new_member` or + /// `reinstate_member`, whose names clearly match the desired intention. + fn insert_member(who: &T::AccountId, rank: Rank) -> DispatchResult { + let params = Parameters::::get().ok_or(Error::::NotGroup)?; + ensure!(MemberCount::::get() < params.max_members, Error::::MaxMembers); + let index = MemberCount::::mutate(|i| { + i.saturating_accrue(1); + *i - 1 + }); + let record = MemberRecord { rank, strikes: 0, vouching: None, index }; + Members::::insert(who, record); + MemberByIndex::::insert(index, who); + Ok(()) + } - // if at least one candidate was accepted... - if !accepted.is_empty() { - // select one as primary, randomly chosen from the accepted, weighted by approvals. - // Choose a random number between 0 and `total_approvals` - let primary_point = pick_usize(&mut rng, total_approvals - 1); - // Find the zero bid or the user who falls on that point - let primary = accepted - .iter() - .find(|e| e.2.is_zero() || e.1 > primary_point) - .expect( - "e.1 of final item == total_approvals; \ - worst case find will always return that item; qed", - ) - .0 - .clone(); - - let accounts = accepted.into_iter().map(|x| x.0).collect::>(); - - // Then write everything back out, signal the changed membership and leave an event. - members.sort(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no - // consensus critical issues or side-effects. This is auto-correcting as members - // fall out of society. - >::put(&members[..]); - >::put(&primary); - - T::MembershipChanged::change_members_sorted(&accounts, &[], members); - Self::deposit_event(Event::::Inducted { primary, candidates: accounts }); - } + /// Add a member back to the members list, setting their `rank` and `payouts`. + /// + /// Can fail when `MaxMember` limit is reached, but in that case it has no side-effects. + /// + /// The `payouts` value must be exactly as it was prior to suspension since no further funds + /// will be reserved. + fn reinstate_member(who: &T::AccountId, rank: Rank) -> DispatchResult { + Self::insert_member(who, rank) + } + + /// Add a member to the members list. If the user is already a member, do nothing. Can fail when + /// `MaxMember` limit is reached, but in that case it has no side-effects. + fn add_new_member(who: &T::AccountId, rank: Rank) -> DispatchResult { + Self::insert_member(who, rank) + } - // Bump the pot by at most PeriodSpend, but less if there's not very much left in our - // account. - let unaccounted = T::Currency::free_balance(&Self::account_id()).saturating_sub(pot); - pot += T::PeriodSpend::get().min(unaccounted / 2u8.into()); + /// Induct a new member into the set. + fn induct_member( + candidate: T::AccountId, + mut candidacy: Candidacy>, + rank: Rank, + ) -> DispatchResult { + Self::add_new_member(&candidate, rank)?; + Self::check_skeptic(&candidate, &mut candidacy); + + let next_head = NextHead::::get() + .filter(|old| { + old.round > candidacy.round || + old.round == candidacy.round && old.bid < candidacy.bid + }) + .unwrap_or_else(|| IntakeRecord { + who: candidate.clone(), + bid: candidacy.bid, + round: candidacy.round, + }); + NextHead::::put(next_head); + + let now = >::block_number(); + let maturity = now + Self::lock_duration(MemberCount::::get()); + Self::reward_bidder(&candidate, candidacy.bid, candidacy.kind, maturity); - >::put(&pot); + Candidates::::remove(&candidate); + Ok(()) + } + + fn strike_member(who: &T::AccountId) -> DispatchResult { + let mut record = Members::::get(who).ok_or(Error::::NotMember)?; + record.strikes.saturating_inc(); + Members::::insert(who, &record); + // ^^^ Keep the member record mutation self-contained as we might be suspending them later + // in this function. + + if record.strikes >= T::GraceStrikes::get() { + // Too many strikes: slash the payout in half. + let total_payout = Payouts::::get(who) + .payouts + .iter() + .fold(BalanceOf::::zero(), |acc, x| acc.saturating_add(x.1)); + Self::slash_payout(who, total_payout / 2u32.into()); } - // Setup the candidates for the new intake - let candidates = Self::take_selected(members.len(), pot); - >::put(&candidates); - - // Select sqrt(n) random members from the society and make them skeptics. - let pick_member = - |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed").clone(); - let skeptics = (0..members.len().integer_sqrt()).map(pick_member).collect::>(); - skeptics.iter().for_each(|skeptic| { - for Bid { who: c, .. } in candidates.iter() { - >::insert(c, skeptic, Vote::Skeptic); - } - }); - Self::deposit_event(Event::::SkepticsChosen { skeptics }); + let params = Parameters::::get().ok_or(Error::::NotGroup)?; + if record.strikes >= params.max_strikes { + // Way too many strikes: suspend. + let _ = Self::suspend_member(who); + } + Ok(()) } - /// Attempt to slash the payout of some member. Return the total amount that was deducted. - fn slash_payout(who: &T::AccountId, value: BalanceOf) -> BalanceOf { - let mut rest = value; - let mut payouts = >::get(who); - if !payouts.is_empty() { - let mut dropped = 0; - for (_, amount) in payouts.iter_mut() { - if let Some(new_rest) = rest.checked_sub(amount) { - // not yet totally slashed after this one; drop it completely. - rest = new_rest; - dropped += 1; + /// Remove a member from the members list and return the candidacy. + /// + /// If the member was vouching, then this will be reset. Any bidders that the member was + /// vouching for will be cancelled unless they are already selected as candidates (in which case + /// they will be able to stand). + /// + /// If the member has existing payouts, they will be retained in the resultant `MemberRecord` + /// and the funds will remain reserved. + /// + /// The Head and the Founder may never be removed. + pub fn remove_member(m: &T::AccountId) -> Result { + ensure!(Head::::get().as_ref() != Some(m), Error::::Head); + ensure!(Founder::::get().as_ref() != Some(m), Error::::Founder); + if let Some(mut record) = Members::::get(m) { + let index = record.index; + let last_index = MemberCount::::mutate(|i| { + i.saturating_reduce(1); + *i + }); + if index != last_index { + // Move the member with the last index down to the index of the member to be + // removed. + if let Some(other) = MemberByIndex::::get(last_index) { + MemberByIndex::::insert(index, &other); + Members::::mutate(other, |m_r| { + if let Some(r) = m_r { + r.index = index + } + }); } else { - // whole slash is accounted for. - *amount -= rest; - rest = Zero::zero(); - break + debug_assert!(false, "ERROR: No member at the last index position?"); } } - >::insert(who, &payouts[dropped..]); + + MemberByIndex::::remove(last_index); + Members::::remove(m); + // Remove their vouching status, potentially unbanning them in the future. + if record.vouching.take() == Some(VouchingStatus::Vouching) { + // Try to remove their bid if they are vouching. + // If their vouch is already a candidate, do nothing. + Bids::::mutate(|bids| + // Try to find the matching bid + if let Some(pos) = bids.iter().position(|b| b.kind.is_vouch(&m)) { + // Remove the bid, and emit an event + let vouched = bids.remove(pos).who; + Self::deposit_event(Event::::Unvouch { candidate: vouched }); + } + ); + } + Ok(record) + } else { + Err(Error::::NotMember.into()) } - value - rest } - /// Bump the payout amount of `who`, to be unlocked at the given block number. - fn bump_payout(who: &T::AccountId, when: T::BlockNumber, value: BalanceOf) { - if !value.is_zero() { - >::mutate(who, |payouts| { - match payouts.binary_search_by_key(&when, |x| x.0) { - Ok(index) => payouts[index].1 += value, - Err(index) => payouts.insert(index, (when, value)), - } - }); + /// Remove a member from the members set and add them to the suspended members. + /// + /// If the member was vouching, then this will be reset. Any bidders that the member was + /// vouching for will be cancelled unless they are already selected as candidates (in which case + /// they will be able to stand). + fn suspend_member(who: &T::AccountId) -> DispatchResult { + let record = Self::remove_member(&who)?; + SuspendedMembers::::insert(who, record); + Self::deposit_event(Event::::MemberSuspended { member: who.clone() }); + Ok(()) + } + + /// Select a member at random, given the RNG `rng`. + /// + /// If no members exist (or the state is inconsistent), then `None` may be returned. + fn pick_member(rng: &mut impl RngCore) -> Option { + let member_count = MemberCount::::get(); + if member_count == 0 { + return None } + let random_index = rng.next_u32() % member_count; + MemberByIndex::::get(random_index) } - /// Suspend a user, removing them from the member list. - fn suspend_member(who: &T::AccountId) { - if Self::remove_member(who).is_ok() { - >::insert(who, true); - >::remove(who); - Self::deposit_event(Event::::MemberSuspended { member: who.clone() }); + /// Select a member at random except `exception`, given the RNG `rng`. + /// + /// If `exception` is the only member (or the state is inconsistent), then `None` may be + /// returned. + fn pick_member_except( + rng: &mut impl RngCore, + exception: &T::AccountId, + ) -> Option { + let member_count = MemberCount::::get(); + if member_count <= 1 { + return None + } + let random_index = rng.next_u32() % (member_count - 1); + let pick = MemberByIndex::::get(random_index); + if pick.as_ref() == Some(exception) { + MemberByIndex::::get(member_count - 1) + } else { + pick + } + } + + /// Select a member who is able to defend at random, given the RNG `rng`. + /// + /// If only the Founder and Head members exist (or the state is inconsistent), then `None` + /// may be returned. + fn pick_defendent(rng: &mut impl RngCore) -> Option { + let member_count = MemberCount::::get(); + if member_count <= 2 { + return None + } + // Founder is always at index 0, so we should never pick that one. + // Head will typically but not always be the highest index. We assume it is for now and + // fix it up later if not. + let head = Head::::get(); + let pickable_count = member_count - if head.is_some() { 2 } else { 1 }; + let random_index = rng.next_u32() % pickable_count + 1; + let pick = MemberByIndex::::get(random_index); + if pick == head && head.is_some() { + // Turns out that head was not the last index since we managed to pick it. Exchange our + // pick for the last index. + MemberByIndex::::get(member_count - 1) + } else { + pick } } /// Pay an accepted candidate their bid value. - fn pay_accepted_candidate( + fn reward_bidder( candidate: &T::AccountId, value: BalanceOf, kind: BidKind>, - maturity: T::BlockNumber, + maturity: BlockNumberFor, ) { let value = match kind { BidKind::Deposit(deposit) => { @@ -1567,11 +1910,17 @@ impl, I: 'static> Pallet { BidKind::Vouch(voucher, tip) => { // Check that the voucher is still vouching, else some other logic may have removed // their status. - if >::take(&voucher) == Some(VouchingStatus::Vouching) { - // In the case that a vouched-for bid is accepted we unset the - // vouching status and transfer the tip over to the voucher. - Self::bump_payout(&voucher, maturity, tip.min(value)); - value.saturating_sub(tip) + if let Some(mut record) = Members::::get(&voucher) { + if let Some(VouchingStatus::Vouching) = record.vouching { + // In the case that a vouched-for bid is accepted we unset the + // vouching status and transfer the tip over to the voucher. + record.vouching = None; + Self::bump_payout(&voucher, maturity, tip.min(value)); + Members::::insert(&voucher, record); + value.saturating_sub(tip) + } else { + value + } } else { value } @@ -1581,52 +1930,71 @@ impl, I: 'static> Pallet { Self::bump_payout(candidate, maturity, value); } - /// End the current challenge period and start a new one. - fn rotate_challenge(members: &mut Vec) { - // Assume there are members, else don't run this logic. - if !members.is_empty() { - // End current defender rotation - if let Some(defender) = Self::defender() { - let mut approval_count = 0; - let mut rejection_count = 0; - // Tallies total number of approve and reject votes for the defender. - members.iter().filter_map(>::take).for_each(|v| match v { - Vote::Approve => approval_count += 1, - _ => rejection_count += 1, - }); - - if approval_count <= rejection_count { - // User has failed the challenge - Self::suspend_member(&defender); - *members = Self::members(); + /// Bump the payout amount of `who`, to be unlocked at the given block number. + /// + /// It is the caller's duty to ensure that `who` is already a member. This does nothing if `who` + /// is not a member or if `value` is zero. + fn bump_payout(who: &T::AccountId, when: BlockNumberFor, value: BalanceOf) { + if value.is_zero() { + return + } + if let Some(MemberRecord { rank: 0, .. }) = Members::::get(who) { + Payouts::::mutate(who, |record| { + // Members of rank 1 never get payouts. + match record.payouts.binary_search_by_key(&when, |x| x.0) { + Ok(index) => record.payouts[index].1.saturating_accrue(value), + Err(index) => { + // If they have too many pending payouts, then we take discard the payment. + let _ = record.payouts.try_insert(index, (when, value)); + }, } + }); + Self::reserve_payout(value); + } + } - // Clean up all votes. - #[allow(deprecated)] - >::remove_all(None); - } - - // Avoid challenging if there's only two members since we never challenge the Head or - // the Founder. - if members.len() > 2 { - // Start a new defender rotation - let phrase = b"society_challenge"; - // we'll need a random seed here. - // TODO: deal with randomness freshness - // https://github.com/paritytech/substrate/issues/8312 - let (seed, _) = T::Randomness::random(phrase); - // seed needs to be guaranteed to be 32 bytes. - let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed"); - let mut rng = ChaChaRng::from_seed(seed); - let chosen = pick_item(&mut rng, &members[1..members.len() - 1]) - .expect("exited if members empty; qed"); - >::put(&chosen); - Self::deposit_event(Event::::Challenged { member: chosen.clone() }); + /// Attempt to slash the payout of some member. Return the total amount that was deducted. + fn slash_payout(who: &T::AccountId, value: BalanceOf) -> BalanceOf { + let mut record = Payouts::::get(who); + let mut rest = value; + while !record.payouts.is_empty() { + if let Some(new_rest) = rest.checked_sub(&record.payouts[0].1) { + // not yet totally slashed after this one; drop it completely. + rest = new_rest; + record.payouts.remove(0); } else { - >::kill(); + // whole slash is accounted for. + record.payouts[0].1.saturating_reduce(rest); + rest = Zero::zero(); + break } } + Payouts::::insert(who, record); + value - rest + } + + /// Transfer some `amount` from the main account into the payouts account and reduce the Pot + /// by this amount. + fn reserve_payout(amount: BalanceOf) { + // Tramsfer payout from the Pot into the payouts account. + Pot::::mutate(|pot| pot.saturating_reduce(amount)); + + // this should never fail since we ensure we can afford the payouts in a previous + // block, but there's not much we can do to recover if it fails anyway. + let res = T::Currency::transfer(&Self::account_id(), &Self::payouts(), amount, AllowDeath); + debug_assert!(res.is_ok()); + } + + /// Transfer some `amount` from the main account into the payouts account and increase the Pot + /// by this amount. + fn unreserve_payout(amount: BalanceOf) { + // Tramsfer payout from the Pot into the payouts account. + Pot::::mutate(|pot| pot.saturating_accrue(amount)); + + // this should never fail since we ensure we can afford the payouts in a previous + // block, but there's not much we can do to recover if it fails anyway. + let res = T::Currency::transfer(&Self::payouts(), &Self::account_id(), amount, AllowDeath); + debug_assert!(res.is_ok()); } /// The account ID of the treasury pot. @@ -1649,74 +2017,10 @@ impl, I: 'static> Pallet { /// /// This is a rather opaque calculation based on the formula here: /// https://www.desmos.com/calculator/9itkal1tce - fn lock_duration(x: u32) -> T::BlockNumber { + fn lock_duration(x: u32) -> BlockNumberFor { let lock_pc = 100 - 50_000 / (x + 500); Percent::from_percent(lock_pc as u8) * T::MaxLockDuration::get() } - - /// Get a selection of bidding accounts such that the total bids is no greater than `Pot` and - /// the number of bids would not surpass `MaxMembers` if all were accepted. - /// - /// May be empty. - pub fn take_selected( - members_len: usize, - pot: BalanceOf, - ) -> Vec>> { - let max_members = MaxMembers::::get() as usize; - let mut max_selections: usize = - (T::MaxCandidateIntake::get() as usize).min(max_members.saturating_sub(members_len)); - - if max_selections > 0 { - // Get the number of left-most bidders whose bids add up to less than `pot`. - let mut bids = >::get(); - - // The list of selected candidates - let mut selected = Vec::new(); - - if bids.len() > 0 { - // Can only select at most the length of bids - max_selections = max_selections.min(bids.len()); - // Number of selected bids so far - let mut count = 0; - // Check if we have already selected a candidate with zero bid - let mut zero_selected = false; - // A running total of the cost to onboard these bids - let mut total_cost: BalanceOf = Zero::zero(); - - bids.retain(|bid| { - if count < max_selections { - // Handle zero bids. We only want one of them. - if bid.value.is_zero() { - // Select only the first zero bid - if !zero_selected { - selected.push(bid.clone()); - zero_selected = true; - count += 1; - return false - } - } else { - total_cost += bid.value; - // Select only as many users as the pot can support. - if total_cost <= pot { - selected.push(bid.clone()); - count += 1; - return false - } - } - } - true - }); - - // No need to reset Bids if we're not taking anything. - if count > 0 { - >::put(bids); - } - } - selected - } else { - vec![] - } - } } impl, I: 'static> OnUnbalanced> for Pallet { diff --git a/frame/society/src/migrations.rs b/frame/society/src/migrations.rs new file mode 100644 index 0000000000000..4685167dcbcfd --- /dev/null +++ b/frame/society/src/migrations.rs @@ -0,0 +1,358 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Migrations for Society Pallet + +use super::*; +use codec::{Decode, Encode}; +use frame_support::traits::{Defensive, DefensiveOption, Instance, OnRuntimeUpgrade}; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +/// The log target. +const TARGET: &'static str = "runtime::society::migration"; + +/// This migration moves all the state to v2 of Society. +pub struct VersionUncheckedMigrateToV2, I: 'static, PastPayouts>( + sp_std::marker::PhantomData<(T, I, PastPayouts)>, +); + +impl< + T: Config, + I: Instance + 'static, + PastPayouts: Get::AccountId, BalanceOf)>>, + > OnRuntimeUpgrade for VersionUncheckedMigrateToV2 +{ + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + ensure!(onchain == 0 && current == 2, "pallet_society: invalid version"); + + Ok((old::Candidates::::get(), old::Members::::get()).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let onchain = Pallet::::on_chain_storage_version(); + if onchain < 2 { + log::info!( + target: TARGET, + "Running migration against onchain version {:?}", + onchain + ); + from_original::(&mut PastPayouts::get()).defensive_unwrap_or(Weight::MAX) + } else { + log::warn!("Unexpected onchain version: {:?} (expected 0)", onchain); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(data: Vec) -> Result<(), TryRuntimeError> { + let old: ( + Vec::AccountId, BalanceOf>>, + Vec<::AccountId>, + ) = Decode::decode(&mut &data[..]).expect("Bad data"); + let mut old_candidates = + old.0.into_iter().map(|x| (x.who, x.kind, x.value)).collect::>(); + let mut old_members = old.1; + let mut candidates = + Candidates::::iter().map(|(k, v)| (k, v.kind, v.bid)).collect::>(); + let mut members = Members::::iter_keys().collect::>(); + + old_candidates.sort_by_key(|x| x.0.clone()); + candidates.sort_by_key(|x| x.0.clone()); + assert_eq!(candidates, old_candidates); + + members.sort(); + old_members.sort(); + assert_eq!(members, old_members); + + ensure!( + Pallet::::on_chain_storage_version() == 2, + "The onchain version must be updated after the migration." + ); + + assert_internal_consistency::(); + Ok(()) + } +} + +/// [`VersionUncheckedMigrateToV2`] wrapped in a +/// [`frame_support::migrations::VersionedRuntimeUpgrade`], ensuring the migration is only performed +/// when on-chain version is 0. +#[cfg(feature = "experimental")] +pub type VersionCheckedMigrateToV2 = + frame_support::migrations::VersionedRuntimeUpgrade< + 0, + 2, + VersionUncheckedMigrateToV2, + crate::pallet::Pallet, + ::DbWeight, + >; + +pub(crate) mod old { + use super::*; + use frame_support::storage_alias; + + /// A vote by a member on a candidate application. + #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] + pub enum Vote { + /// The member has been chosen to be skeptic and has not yet taken any action. + Skeptic, + /// The member has rejected the candidate's application. + Reject, + /// The member approves of the candidate's application. + Approve, + } + + #[storage_alias] + pub type Bids, I: 'static> = StorageValue< + Pallet, + Vec::AccountId, BalanceOf>>, + ValueQuery, + >; + #[storage_alias] + pub type Candidates, I: 'static> = StorageValue< + Pallet, + Vec::AccountId, BalanceOf>>, + ValueQuery, + >; + #[storage_alias] + pub type Votes, I: 'static> = StorageDoubleMap< + Pallet, + Twox64Concat, + ::AccountId, + Twox64Concat, + ::AccountId, + Vote, + >; + #[storage_alias] + pub type SuspendedCandidates, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + (BalanceOf, BidKind<::AccountId, BalanceOf>), + >; + #[storage_alias] + pub type Members, I: 'static> = + StorageValue, Vec<::AccountId>, ValueQuery>; + #[storage_alias] + pub type Vouching, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + VouchingStatus, + >; + #[storage_alias] + pub type Strikes, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + StrikeCount, + ValueQuery, + >; + #[storage_alias] + pub type Payouts, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + Vec<(frame_system::pallet_prelude::BlockNumberFor, BalanceOf)>, + ValueQuery, + >; + #[storage_alias] + pub type SuspendedMembers, I: 'static> = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + bool, + ValueQuery, + >; + #[storage_alias] + pub type Defender, I: 'static> = + StorageValue, ::AccountId>; + #[storage_alias] + pub type DefenderVotes, I: 'static> = + StorageMap, Twox64Concat, ::AccountId, Vote>; +} + +/// Will panic if there are any inconsistencies in the pallet's state or old keys remaining. +pub fn assert_internal_consistency, I: Instance + 'static>() { + // Check all members are valid data. + let mut members = vec![]; + for m in Members::::iter_keys() { + let r = Members::::get(&m).expect("Member data must be valid"); + members.push((m, r)); + } + assert_eq!(MemberCount::::get(), members.len() as u32); + for (who, record) in members.iter() { + assert_eq!(MemberByIndex::::get(record.index).as_ref(), Some(who)); + } + if let Some(founder) = Founder::::get() { + assert_eq!(Members::::get(founder).expect("founder is member").index, 0); + } + if let Some(head) = Head::::get() { + assert!(Members::::contains_key(head)); + } + // Check all votes are valid data. + for (k1, k2) in Votes::::iter_keys() { + assert!(Votes::::get(k1, k2).is_some()); + } + // Check all defender votes are valid data. + for (k1, k2) in DefenderVotes::::iter_keys() { + assert!(DefenderVotes::::get(k1, k2).is_some()); + } + // Check all candidates are valid data. + for k in Candidates::::iter_keys() { + assert!(Candidates::::get(k).is_some()); + } + // Check all suspended members are valid data. + for m in SuspendedMembers::::iter_keys() { + assert!(SuspendedMembers::::get(m).is_some()); + } + // Check all payouts are valid data. + for p in Payouts::::iter_keys() { + let k = Payouts::::hashed_key_for(&p); + let v = frame_support::storage::unhashed::get_raw(&k[..]).expect("value is in map"); + assert!(PayoutRecordFor::::decode(&mut &v[..]).is_ok()); + } + + // We don't use these - make sure they don't exist. + assert_eq!(old::SuspendedCandidates::::iter().count(), 0); + assert_eq!(old::Strikes::::iter().count(), 0); + assert_eq!(old::Vouching::::iter().count(), 0); + assert!(!old::Defender::::exists()); + assert!(!old::Members::::exists()); +} + +pub fn from_original, I: Instance + 'static>( + past_payouts: &mut [(::AccountId, BalanceOf)], +) -> Result { + // Migrate Bids from old::Bids (just a trunctation). + Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(old::Bids::::take())); + + // Initialise round counter. + RoundCount::::put(0); + + // Migrate Candidates from old::Candidates + for Bid { who: candidate, kind, value } in old::Candidates::::take().into_iter() { + let mut tally = Tally::default(); + // Migrate Votes from old::Votes + // No need to drain, since we're overwriting values. + for (voter, vote) in old::Votes::::iter_prefix(&candidate) { + Votes::::insert( + &candidate, + &voter, + Vote { approve: vote == old::Vote::Approve, weight: 1 }, + ); + match vote { + old::Vote::Approve => tally.approvals.saturating_inc(), + old::Vote::Reject => tally.rejections.saturating_inc(), + old::Vote::Skeptic => Skeptic::::put(&voter), + } + } + Candidates::::insert( + &candidate, + Candidacy { round: 0, kind, tally, skeptic_struck: false, bid: value }, + ); + } + + // Migrate Members from old::Members old::Strikes old::Vouching + let mut member_count = 0; + for member in old::Members::::take() { + let strikes = old::Strikes::::take(&member); + let vouching = old::Vouching::::take(&member); + let record = MemberRecord { index: member_count, rank: 0, strikes, vouching }; + Members::::insert(&member, record); + MemberByIndex::::insert(member_count, &member); + + // The founder must be the first member in Society V2. If we find the founder not in index + // zero, we swap it with the first member. + if member == Founder::::get().defensive_ok_or("founder must always be set")? && + member_count > 0 + { + let member_to_swap = MemberByIndex::::get(0) + .defensive_ok_or("member_count > 0, we must have at least 1 member")?; + // Swap the founder with the first member in MemberByIndex. + MemberByIndex::::swap(0, member_count); + // Update the indicies of the swapped member MemberRecords. + Members::::mutate(&member, |m| { + if let Some(member) = m { + member.index = 0; + } else { + frame_support::defensive!( + "Member somehow disapeared from storage after it was inserted" + ); + } + }); + Members::::mutate(&member_to_swap, |m| { + if let Some(member) = m { + member.index = member_count; + } else { + frame_support::defensive!( + "Member somehow disapeared from storage after it was queried" + ); + } + }); + } + member_count.saturating_inc(); + } + MemberCount::::put(member_count); + + // Migrate Payouts from: old::Payouts and raw info (needed since we can't query old chain + // state). + past_payouts.sort(); + for (who, mut payouts) in old::Payouts::::iter() { + payouts.truncate(T::MaxPayouts::get() as usize); + // ^^ Safe since we already truncated. + let paid = past_payouts + .binary_search_by_key(&&who, |x| &x.0) + .ok() + .map(|p| past_payouts[p].1) + .unwrap_or(Zero::zero()); + match BoundedVec::try_from(payouts) { + Ok(payouts) => Payouts::::insert(who, PayoutRecord { paid, payouts }), + Err(_) => debug_assert!(false, "Truncation of Payouts ineffective??"), + } + } + + // Migrate SuspendedMembers from old::SuspendedMembers old::Strikes old::Vouching. + for who in old::SuspendedMembers::::iter_keys() { + let strikes = old::Strikes::::take(&who); + let vouching = old::Vouching::::take(&who); + let record = MemberRecord { index: 0, rank: 0, strikes, vouching }; + SuspendedMembers::::insert(&who, record); + } + + // Any suspended candidates remaining are rejected. + let _ = old::SuspendedCandidates::::clear(u32::MAX, None); + + // We give the current defender the benefit of the doubt. + old::Defender::::kill(); + let _ = old::DefenderVotes::::clear(u32::MAX, None); + + Ok(T::BlockWeights::get().max_block) +} + +pub fn from_raw_past_payouts, I: Instance + 'static>( + past_payouts_raw: impl Iterator, +) -> Vec<(::AccountId, BalanceOf)> { + past_payouts_raw + .filter_map(|(x, y)| Some((Decode::decode(&mut &x[..]).ok()?, y.try_into().ok()?))) + .collect() +} diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 9f72febc2106e..a318c2e794b7a 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -21,27 +21,25 @@ use super::*; use crate as pallet_society; use frame_support::{ - ord_parameter_types, parameter_types, + assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_support_test::TestRandomness; use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +use RuntimeOrigin as Origin; + type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Society: pallet_society::{Pallet, Call, Storage, Event, Config}, } @@ -52,8 +50,12 @@ parameter_types! { } ord_parameter_types! { + pub const ChallengePeriod: u64 = 8; + pub const ClaimPeriod: u64 = 1; pub const FounderSetAccount: u128 = 1; pub const SuspensionJudgementSetAccount: u128 = 2; + pub const MaxPayouts: u32 = 10; + pub const MaxBids: u32 = 10; } impl frame_system::Config for Test { @@ -62,14 +64,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -95,40 +96,37 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } impl Config for Test { type RuntimeEvent = RuntimeEvent; + type PalletId = SocietyPalletId; type Currency = pallet_balances::Pallet; type Randomness = TestRandomness; - type CandidateDeposit = ConstU64<25>; - type WrongSideDeduction = ConstU64<2>; - type MaxStrikes = ConstU32<2>; + type GraceStrikes = ConstU32<1>; type PeriodSpend = ConstU64<1000>; - type MembershipChanged = (); - type RotationPeriod = ConstU64<4>; + type VotingPeriod = ConstU64<3>; + type ClaimPeriod = ClaimPeriod; type MaxLockDuration = ConstU64<100>; type FounderSetOrigin = EnsureSignedBy; - type SuspensionJudgementOrigin = EnsureSignedBy; - type ChallengePeriod = ConstU64<8>; - type MaxCandidateIntake = ConstU32<10>; - type PalletId = SocietyPalletId; + type ChallengePeriod = ChallengePeriod; + type MaxPayouts = MaxPayouts; + type MaxBids = MaxBids; + type WeightInfo = (); } pub struct EnvBuilder { - members: Vec, balance: u64, balances: Vec<(u128, u64)>, pot: u64, - max_members: u32, + founded: bool, } impl EnvBuilder { pub fn new() -> Self { Self { - members: vec![10], balance: 10_000, balances: vec![ (10, 50), @@ -142,49 +140,32 @@ impl EnvBuilder { (90, 50), ], pot: 0, - max_members: 100, + founded: true, } } pub fn execute R>(mut self, f: F) -> R { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); self.balances.push((Society::account_id(), self.balance.max(self.pot))); pallet_balances::GenesisConfig:: { balances: self.balances } .assimilate_storage(&mut t) .unwrap(); - pallet_society::GenesisConfig:: { - members: self.members, - pot: self.pot, - max_members: self.max_members, - } - .assimilate_storage(&mut t) - .unwrap(); + pallet_society::GenesisConfig:: { pot: self.pot } + .assimilate_storage(&mut t) + .unwrap(); let mut ext: sp_io::TestExternalities = t.into(); - ext.execute_with(f) - } - #[allow(dead_code)] - pub fn with_members(mut self, m: Vec) -> Self { - self.members = m; - self - } - #[allow(dead_code)] - pub fn with_balances(mut self, b: Vec<(u128, u64)>) -> Self { - self.balances = b; - self - } - #[allow(dead_code)] - pub fn with_pot(mut self, p: u64) -> Self { - self.pot = p; - self - } - #[allow(dead_code)] - pub fn with_balance(mut self, b: u64) -> Self { - self.balance = b; - self + ext.execute_with(|| { + if self.founded { + let r = b"be cool".to_vec(); + assert!(Society::found_society(Origin::signed(1), 10, 10, 8, 2, 25, r).is_ok()); + } + let r = f(); + migrations::assert_internal_consistency::(); + r + }) } - #[allow(dead_code)] - pub fn with_max_members(mut self, n: u32) -> Self { - self.max_members = n; + pub fn founded(mut self, f: bool) -> Self { + self.founded = f; self } } @@ -202,10 +183,121 @@ pub fn run_to_block(n: u64) { } /// Creates a bid struct using input parameters. -pub fn create_bid( - value: Balance, +pub fn bid( who: AccountId, kind: BidKind, + value: Balance, ) -> Bid { Bid { who, kind, value } } + +/// Creates a candidate struct using input parameters. +pub fn candidacy( + round: RoundIndex, + bid: Balance, + kind: BidKind, + approvals: VoteCount, + rejections: VoteCount, +) -> Candidacy { + Candidacy { round, kind, bid, tally: Tally { approvals, rejections }, skeptic_struck: false } +} + +pub fn next_challenge() { + let challenge_period: u64 = ::ChallengePeriod::get(); + let now = System::block_number(); + run_to_block(now + challenge_period - now % challenge_period); +} + +pub fn next_voting() { + if let Period::Voting { more, .. } = Society::period() { + run_to_block(System::block_number() + more); + } +} + +pub fn conclude_intake(allow_resignation: bool, judge_intake: Option) { + next_voting(); + let round = RoundCount::::get(); + for (who, candidacy) in Candidates::::iter() { + if candidacy.tally.clear_approval() { + assert_ok!(Society::claim_membership(Origin::signed(who))); + assert_noop!( + Society::claim_membership(Origin::signed(who)), + Error::::NotCandidate + ); + continue + } + if candidacy.tally.clear_rejection() && allow_resignation { + assert_noop!( + Society::claim_membership(Origin::signed(who)), + Error::::NotApproved + ); + assert_ok!(Society::resign_candidacy(Origin::signed(who))); + continue + } + if let (Some(founder), Some(approve)) = (Founder::::get(), judge_intake) { + if !candidacy.tally.clear_approval() && !approve { + // can be rejected by founder + assert_ok!(Society::kick_candidate(Origin::signed(founder), who)); + continue + } + if !candidacy.tally.clear_rejection() && approve { + // can be rejected by founder + assert_ok!(Society::bestow_membership(Origin::signed(founder), who)); + continue + } + } + if candidacy.tally.clear_rejection() && round > candidacy.round + 1 { + assert_noop!( + Society::claim_membership(Origin::signed(who)), + Error::::NotApproved + ); + assert_ok!(Society::drop_candidate(Origin::signed(0), who)); + assert_noop!( + Society::drop_candidate(Origin::signed(0), who), + Error::::NotCandidate + ); + continue + } + if !candidacy.skeptic_struck { + assert_ok!(Society::punish_skeptic(Origin::signed(who))); + } + } +} + +pub fn next_intake() { + let claim_period: u64 = ::ClaimPeriod::get(); + match Society::period() { + Period::Voting { more, .. } => run_to_block(System::block_number() + more + claim_period), + Period::Claim { more, .. } => run_to_block(System::block_number() + more), + } +} + +pub fn place_members(members: impl AsRef<[u128]>) { + for who in members.as_ref() { + assert_ok!(Society::insert_member(who, 0)); + } +} + +pub fn members() -> Vec { + let mut r = Members::::iter_keys().collect::>(); + r.sort(); + r +} + +pub fn membership() -> Vec<(u128, MemberRecord)> { + let mut r = Members::::iter().collect::>(); + r.sort_by_key(|x| x.0); + r +} + +pub fn candidacies() -> Vec<(u128, Candidacy)> { + let mut r = Candidates::::iter().collect::>(); + r.sort_by_key(|x| x.0); + r +} + +pub fn candidates() -> Vec { + let mut r = Candidates::::iter_keys().collect::>(); + r.sort(); + r +} diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 8a5c626dea5b7..ea2afef3b32b5 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -18,65 +18,196 @@ //! Tests for the module. use super::*; +use migrations::old; use mock::*; use frame_support::{assert_noop, assert_ok}; use sp_core::blake2_256; use sp_runtime::traits::BadOrigin; +use BidKind::*; +use VouchingStatus::*; + +use RuntimeOrigin as Origin; + +#[test] +fn migration_works() { + EnvBuilder::new().founded(false).execute(|| { + use old::Vote::*; + + // Initialise the old storage items. + Founder::::put(10); + Head::::put(30); + old::Members::::put(vec![10, 20, 30]); + old::Vouching::::insert(30, Vouching); + old::Vouching::::insert(40, Banned); + old::Strikes::::insert(20, 1); + old::Strikes::::insert(30, 2); + old::Strikes::::insert(40, 5); + old::Payouts::::insert(20, vec![(1, 1)]); + old::Payouts::::insert( + 30, + (0..=::MaxPayouts::get()) + .map(|i| (i as u64, i as u64)) + .collect::>(), + ); + old::SuspendedMembers::::insert(40, true); + + old::Defender::::put(20); + old::DefenderVotes::::insert(10, Approve); + old::DefenderVotes::::insert(20, Approve); + old::DefenderVotes::::insert(30, Reject); + + old::SuspendedCandidates::::insert(50, (10, Deposit(100))); + + old::Candidates::::put(vec![ + Bid { who: 60, kind: Deposit(100), value: 200 }, + Bid { who: 70, kind: Vouch(30, 30), value: 100 }, + ]); + old::Votes::::insert(60, 10, Approve); + old::Votes::::insert(70, 10, Reject); + old::Votes::::insert(70, 20, Approve); + old::Votes::::insert(70, 30, Approve); + + let bids = (0..=::MaxBids::get()) + .map(|i| Bid { + who: 100u128 + i as u128, + kind: Deposit(20u64 + i as u64), + value: 10u64 + i as u64, + }) + .collect::>(); + old::Bids::::put(bids); + + migrations::from_original::(&mut [][..]).expect("migration failed"); + migrations::assert_internal_consistency::(); + + assert_eq!( + membership(), + vec![ + (10, MemberRecord { rank: 0, strikes: 0, vouching: None, index: 0 }), + (20, MemberRecord { rank: 0, strikes: 1, vouching: None, index: 1 }), + (30, MemberRecord { rank: 0, strikes: 2, vouching: Some(Vouching), index: 2 }), + ] + ); + assert_eq!(Payouts::::get(10), PayoutRecord::default()); + let payouts = vec![(1, 1)].try_into().unwrap(); + assert_eq!(Payouts::::get(20), PayoutRecord { paid: 0, payouts }); + let payouts = (0..::MaxPayouts::get()) + .map(|i| (i as u64, i as u64)) + .collect::>() + .try_into() + .unwrap(); + assert_eq!(Payouts::::get(30), PayoutRecord { paid: 0, payouts }); + assert_eq!( + SuspendedMembers::::iter().collect::>(), + vec![(40, MemberRecord { rank: 0, strikes: 5, vouching: Some(Banned), index: 0 }),] + ); + let bids: BoundedVec<_, ::MaxBids> = (0..::MaxBids::get()) + .map(|i| Bid { + who: 100u128 + i as u128, + kind: Deposit(20u64 + i as u64), + value: 10u64 + i as u64, + }) + .collect::>() + .try_into() + .unwrap(); + assert_eq!(Bids::::get(), bids); + assert_eq!(RoundCount::::get(), 0); + assert_eq!( + candidacies(), + vec![ + ( + 60, + Candidacy { + round: 0, + kind: Deposit(100), + bid: 200, + tally: Tally { approvals: 1, rejections: 0 }, + skeptic_struck: false, + } + ), + ( + 70, + Candidacy { + round: 0, + kind: Vouch(30, 30), + bid: 100, + tally: Tally { approvals: 2, rejections: 1 }, + skeptic_struck: false, + } + ), + ] + ); + assert_eq!(Votes::::get(60, 10), Some(Vote { approve: true, weight: 1 })); + assert_eq!(Votes::::get(70, 10), Some(Vote { approve: false, weight: 1 })); + assert_eq!(Votes::::get(70, 20), Some(Vote { approve: true, weight: 1 })); + assert_eq!(Votes::::get(70, 30), Some(Vote { approve: true, weight: 1 })); + }); +} #[test] fn founding_works() { - EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { + EnvBuilder::new().founded(false).execute(|| { // Not set up initially. - assert_eq!(Society::founder(), None); - assert_eq!(Society::max_members(), 0); - assert_eq!(Society::pot(), 0); + assert_eq!(Founder::::get(), None); + assert_eq!(Parameters::::get(), None); + assert_eq!(Pot::::get(), 0); // Account 1 is set as the founder origin // Account 5 cannot start a society - assert_noop!(Society::found(RuntimeOrigin::signed(5), 20, 100, vec![]), BadOrigin); + assert_noop!( + Society::found_society(Origin::signed(5), 20, 100, 10, 2, 25, vec![]), + BadOrigin + ); // Account 1 can start a society, where 10 is the founding member - assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, b"be cool".to_vec())); + assert_ok!(Society::found_society( + Origin::signed(1), + 10, + 100, + 10, + 2, + 25, + b"be cool".to_vec() + )); // Society members only include 10 - assert_eq!(Society::members(), vec![10]); + assert_eq!(members(), vec![10]); // 10 is the head of the society - assert_eq!(Society::head(), Some(10)); + assert_eq!(Head::::get(), Some(10)); // ...and also the founder - assert_eq!(Society::founder(), Some(10)); + assert_eq!(Founder::::get(), Some(10)); // 100 members max - assert_eq!(Society::max_members(), 100); + assert_eq!(Parameters::::get().unwrap().max_members, 100); // rules are correct - assert_eq!(Society::rules(), Some(blake2_256(b"be cool").into())); + assert_eq!(Rules::::get(), Some(blake2_256(b"be cool").into())); // Pot grows after first rotation period - run_to_block(4); - assert_eq!(Society::pot(), 1000); + next_intake(); + assert_eq!(Pot::::get(), 1000); // Cannot start another society assert_noop!( - Society::found(RuntimeOrigin::signed(1), 20, 100, vec![]), - Error::::AlreadyFounded + Society::found_society(Origin::signed(1), 20, 100, 10, 2, 25, vec![]), + Error::::AlreadyFounded ); }); } #[test] fn unfounding_works() { - EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { + EnvBuilder::new().founded(false).execute(|| { // Account 1 sets the founder... - assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, vec![])); + assert_ok!(Society::found_society(Origin::signed(1), 10, 100, 10, 2, 25, vec![])); // Account 2 cannot unfound it as it's not the founder. - assert_noop!(Society::unfound(RuntimeOrigin::signed(2)), Error::::NotFounder); + assert_noop!(Society::dissolve(Origin::signed(2)), Error::::NotFounder); // Account 10 can, though. - assert_ok!(Society::unfound(RuntimeOrigin::signed(10))); + assert_ok!(Society::dissolve(Origin::signed(10))); // 1 sets the founder to 20 this time - assert_ok!(Society::found(RuntimeOrigin::signed(1), 20, 100, vec![])); + assert_ok!(Society::found_society(Origin::signed(1), 20, 100, 10, 2, 25, vec![])); // Bring in a new member... - assert_ok!(Society::bid(RuntimeOrigin::signed(10), 0)); - run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(20), 10, true)); - run_to_block(8); + assert_ok!(Society::bid(Origin::signed(10), 0)); + next_intake(); + assert_ok!(Society::vote(Origin::signed(20), 10, true)); + conclude_intake(true, None); // Unfounding won't work now, even though it's from 20. - assert_noop!(Society::unfound(RuntimeOrigin::signed(20)), Error::::NotHead); + assert_noop!(Society::dissolve(Origin::signed(20)), Error::::NotHead); }); } @@ -89,15 +220,16 @@ fn basic_new_member_works() { assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotate period every 4 blocks - run_to_block(4); + next_intake(); // 20 is now a candidate - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); + assert_eq!(candidacies(), vec![(20, candidacy(1, 0, Deposit(25), 0, 0))]); // 10 (a member) can vote for the candidate - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + conclude_intake(true, None); // Rotate period every 4 blocks - run_to_block(8); + next_intake(); // 20 is now a member of the society - assert_eq!(Society::members(), vec![10, 20]); + assert_eq!(members(), vec![10, 20]); // Reserved balance is returned assert_eq!(Balances::free_balance(20), 50); assert_eq!(Balances::reserved_balance(20), 0); @@ -113,57 +245,60 @@ fn bidding_works() { assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); // Rotate period - run_to_block(4); + next_intake(); // Pot is 1000 after "PeriodSpend" - assert_eq!(Society::pot(), 1000); + assert_eq!(Pot::::get(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot assert_eq!( - Society::candidates(), + candidacies(), vec![ - create_bid(300, 30, BidKind::Deposit(25)), - create_bid(400, 40, BidKind::Deposit(25)), + (30, candidacy(1, 300, Deposit(25), 0, 0)), + (40, candidacy(1, 400, Deposit(25), 0, 0)), ] ); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); - run_to_block(8); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); + conclude_intake(true, None); + next_intake(); // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 40]); + assert_eq!(members(), vec![10, 30, 40]); // Pot is increased by 1000, but pays out 700 to the members assert_eq!(Balances::free_balance(Society::account_id()), 9_300); - assert_eq!(Society::pot(), 1_300); + assert_eq!(Pot::::get(), 1_300); // Left over from the original bids is 50 who satisfies the condition of bid less than pot. - assert_eq!(Society::candidates(), vec![create_bid(500, 50, BidKind::Deposit(25))]); + assert_eq!(candidacies(), vec![(50, candidacy(2, 500, Deposit(25), 0, 0))]); // 40, now a member, can vote for 50 - assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, true)); + assert_ok!(Society::vote(Origin::signed(40), 50, true)); + conclude_intake(true, None); run_to_block(12); // 50 is now a member - assert_eq!(Society::members(), vec![10, 30, 40, 50]); + assert_eq!(members(), vec![10, 30, 40, 50]); // Pot is increased by 1000, and 500 is paid out. Total payout so far is 1200. - assert_eq!(Society::pot(), 1_800); + assert_eq!(Pot::::get(), 1_800); assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // No more candidates satisfy the requirements - assert_eq!(Society::candidates(), vec![]); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around - // Next period + assert_eq!(candidacies(), vec![]); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + // Next period run_to_block(16); // Same members - assert_eq!(Society::members(), vec![10, 30, 40, 50]); + assert_eq!(members(), vec![10, 30, 40, 50]); // Pot is increased by 1000 again - assert_eq!(Society::pot(), 2_800); + assert_eq!(Pot::::get(), 2_800); // No payouts assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // Candidate 60 now qualifies based on the increased pot size. - assert_eq!(Society::candidates(), vec![create_bid(1900, 60, BidKind::Deposit(25))]); + assert_eq!(candidacies(), vec![(60, candidacy(4, 1900, Deposit(25), 0, 0))]); // Candidate 60 is voted in. - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 60, true)); + assert_ok!(Society::vote(Origin::signed(50), 60, true)); + conclude_intake(true, None); run_to_block(20); // 60 joins as a member - assert_eq!(Society::members(), vec![10, 30, 40, 50, 60]); + assert_eq!(members(), vec![10, 30, 40, 50, 60]); // Pay them - assert_eq!(Society::pot(), 1_900); + assert_eq!(Pot::::get(), 1_900); assert_eq!(Balances::free_balance(Society::account_id()), 6_900); }); } @@ -177,16 +312,15 @@ fn unbidding_works() { // Balances are reserved assert_eq!(Balances::free_balance(30), 25); assert_eq!(Balances::reserved_balance(30), 25); - // Must know right position to unbid + cannot unbid someone else - assert_noop!(Society::unbid(RuntimeOrigin::signed(30), 1), Error::::BadPosition); // Can unbid themselves with the right position - assert_ok!(Society::unbid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::unbid(Origin::signed(30))); + assert_noop!(Society::unbid(Origin::signed(30)), Error::::NotBidder); // Balance is returned assert_eq!(Balances::free_balance(30), 50); assert_eq!(Balances::reserved_balance(30), 0); // 20 wins candidacy - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Deposit(25))]); + next_intake(); + assert_eq!(candidacies(), vec![(20, candidacy(1, 1000, Deposit(25), 0, 0))]); }); } @@ -195,13 +329,13 @@ fn payout_works() { EnvBuilder::new().execute(|| { // Original balance of 50 assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); - run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); - run_to_block(8); + assert_ok!(Society::bid(Origin::signed(20), 1000)); + next_intake(); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + conclude_intake(true, None); // payout not ready - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); - run_to_block(9); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); + next_intake(); // payout should be here assert_ok!(Society::payout(RuntimeOrigin::signed(20))); assert_eq!(Balances::free_balance(20), 1050); @@ -209,22 +343,34 @@ fn payout_works() { } #[test] -fn basic_new_member_skeptic_works() { +fn non_voting_skeptic_is_punished() { EnvBuilder::new().execute(|| { - // NOTE: events are not deposited in the genesis event - System::set_block_number(1); - - assert_eq!(Strikes::::get(10), 0); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - run_to_block(8); - assert_eq!(Society::members(), vec![10]); - assert_eq!(Strikes::::get(10), 1); + assert_eq!(Members::::get(10).unwrap().strikes, 0); + assert_ok!(Society::bid(Origin::signed(20), 0)); + next_intake(); + assert_eq!(candidacies(), vec![(20, candidacy(1, 0, Deposit(25), 0, 0))]); + conclude_intake(true, None); + next_intake(); + assert_eq!(members(), vec![10]); + assert_eq!(Members::::get(10).unwrap().strikes, 1); + }); +} - System::assert_last_event(mock::RuntimeEvent::Society(crate::Event::SkepticsChosen { - skeptics: vec![10], - })); +#[test] +fn rejecting_skeptic_on_approved_is_punished() { + EnvBuilder::new().execute(|| { + place_members([20, 30]); + assert_ok!(Society::bid(Origin::signed(40), 0)); + next_intake(); + let skeptic = Skeptic::::get().unwrap(); + for &i in &[10, 20, 30][..] { + assert_ok!(Society::vote(Origin::signed(i), 40, i != skeptic)); + } + conclude_intake(true, None); + assert_eq!(Members::::get(10).unwrap().strikes, 0); + run_to_block(12); + assert_eq!(members(), vec![10, 20, 30, 40]); + assert_eq!(Members::::get(skeptic).unwrap().strikes, 1); }); } @@ -238,16 +384,17 @@ fn basic_new_member_reject_works() { assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotation Period - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); + next_intake(); + assert_eq!(candidacies(), vec![(20, candidacy(1, 0, Deposit(25), 0, 0))]); // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); - run_to_block(8); + assert_ok!(Society::vote(Origin::signed(10), 20, false)); + conclude_intake(true, None); + next_intake(); // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); + assert_eq!(members(), vec![10]); + // User is rejected. + assert_eq!(candidacies(), vec![]); + assert_eq!(Bids::::get().into_inner(), vec![]); }); } @@ -255,20 +402,30 @@ fn basic_new_member_reject_works() { fn slash_payout_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); - run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); - run_to_block(8); + assert_ok!(Society::bid(Origin::signed(20), 1000)); + next_intake(); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + conclude_intake(true, None); // payout in queue - assert_eq!(Payouts::::get(20), vec![(9, 1000)]); - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(8, 1000)].try_into().unwrap() } + ); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); // slash payout assert_eq!(Society::slash_payout(&20, 500), 500); - assert_eq!(Payouts::::get(20), vec![(9, 500)]); - run_to_block(9); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(8, 500)].try_into().unwrap() } + ); + run_to_block(8); // payout should be here, but 500 less assert_ok!(Society::payout(RuntimeOrigin::signed(20))); assert_eq!(Balances::free_balance(20), 550); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 500, payouts: Default::default() } + ); }); } @@ -276,19 +433,32 @@ fn slash_payout_works() { fn slash_payout_multi_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); + place_members([20]); // create a few payouts Society::bump_payout(&20, 5, 100); Society::bump_payout(&20, 10, 100); Society::bump_payout(&20, 15, 100); Society::bump_payout(&20, 20, 100); // payouts in queue - assert_eq!(Payouts::::get(20), vec![(5, 100), (10, 100), (15, 100), (20, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { + paid: 0, + payouts: vec![(5, 100), (10, 100), (15, 100), (20, 100)].try_into().unwrap() + } + ); // slash payout assert_eq!(Society::slash_payout(&20, 250), 250); - assert_eq!(Payouts::::get(20), vec![(15, 50), (20, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(15, 50), (20, 100)].try_into().unwrap() } + ); // slash again assert_eq!(Society::slash_payout(&20, 50), 50); - assert_eq!(Payouts::::get(20), vec![(20, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(20, 100)].try_into().unwrap() } + ); }); } @@ -296,172 +466,226 @@ fn slash_payout_multi_works() { fn suspended_member_life_cycle_works() { EnvBuilder::new().execute(|| { // Add 20 to members, who is not the head and can be suspended/removed. - assert_ok!(Society::add_member(&20)); - assert_eq!(>::get(), vec![10, 20]); - assert_eq!(Strikes::::get(20), 0); - assert_eq!(>::get(20), false); + place_members([20]); + assert_eq!(members(), vec![10, 20]); + assert_eq!(Members::::get(20).unwrap().strikes, 0); + assert!(!SuspendedMembers::::contains_key(20)); // Let's suspend account 20 by giving them 2 strikes by not voting - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); - run_to_block(8); - assert_eq!(Strikes::::get(20), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); - run_to_block(16); + assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(40), 1)); + next_intake(); + conclude_intake(false, None); - // Strike 2 is accumulated, and 20 is suspended :( - assert_eq!(>::get(20), true); - assert_eq!(>::get(), vec![10]); + // 2 strikes are accumulated, and 20 is suspended :( + assert!(SuspendedMembers::::contains_key(20)); + assert_eq!(members(), vec![10]); // Suspended members cannot get payout Society::bump_payout(&20, 10, 100); - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NotMember); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NotMember); // Normal people cannot make judgement assert_noop!( - Society::judge_suspended_member(RuntimeOrigin::signed(20), 20, true), - BadOrigin + Society::judge_suspended_member(Origin::signed(20), 20, true), + Error::::NotFounder ); // Suspension judgment origin can judge thee // Suspension judgement origin forgives the suspended member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, true)); - assert_eq!(>::get(20), false); - assert_eq!(>::get(), vec![10, 20]); + assert_ok!(Society::judge_suspended_member(Origin::signed(10), 20, true)); + assert!(!SuspendedMembers::::contains_key(20)); + assert_eq!(members(), vec![10, 20]); // Let's suspend them again, directly - Society::suspend_member(&20); - assert_eq!(>::get(20), true); + assert_ok!(Society::suspend_member(&20)); + assert!(SuspendedMembers::::contains_key(20)); // Suspension judgement origin does not forgive the suspended member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(Origin::signed(10), 20, false)); // Cleaned up - assert_eq!(>::get(20), false); - assert_eq!(>::get(), vec![10]); - assert_eq!(>::get(20), vec![]); + assert!(!SuspendedMembers::::contains_key(20)); + assert_eq!(members(), vec![10]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![].try_into().unwrap() } + ); }); } #[test] fn suspended_candidate_rejected_works() { EnvBuilder::new().execute(|| { - // Starting Balance - assert_eq!(Balances::free_balance(20), 50); - assert_eq!(Balances::free_balance(Society::account_id()), 10000); - // 20 makes a bid - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 25); - // Rotation Period - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); - run_to_block(8); - // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); + place_members([20, 30]); + // 40, 50, 60, 70, 80 make bids + for &x in &[40u128, 50, 60, 70] { + assert_ok!(Society::bid(Origin::signed(x), 10)); + assert_eq!(Balances::free_balance(x), 25); + assert_eq!(Balances::reserved_balance(x), 25); + } - // Normal user cannot make judgement on suspended candidate - assert_noop!( - Society::judge_suspended_candidate(RuntimeOrigin::signed(20), 20, Judgement::Approve), - BadOrigin + // Rotation Period + next_intake(); + assert_eq!( + candidacies(), + vec![ + (40, candidacy(1, 10, Deposit(25), 0, 0)), + (50, candidacy(1, 10, Deposit(25), 0, 0)), + (60, candidacy(1, 10, Deposit(25), 0, 0)), + (70, candidacy(1, 10, Deposit(25), 0, 0)), + ] ); - // Suspension judgement origin makes no direct judgement - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Rebid - )); - // They are placed back in bid pool, repeat suspension process - // Rotation Period - run_to_block(12); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); - run_to_block(16); - // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); - - // Suspension judgement origin rejects the candidate - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Reject - )); - // User is slashed - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 0); - // Funds are deposited to society account - assert_eq!(Balances::free_balance(Society::account_id()), 10025); - // Cleaned up - assert_eq!(Society::candidates(), vec![]); - assert_eq!(>::get(20), None); + // Split vote over all. + for &x in &[40, 50, 60, 70] { + assert_ok!(Society::vote(Origin::signed(20), x, false)); + assert_ok!(Society::vote(Origin::signed(30), x, true)); + } + + // Voting continues, as no canidate is clearly accepted yet and the founder chooses not to + // act. + conclude_intake(false, None); + assert_eq!(members(), vec![10, 20, 30]); + assert_eq!(candidates(), vec![40, 50, 60, 70]); + + // 40 gets approved after founder weighs in giving it a clear approval. + // but the founder's rejection of 60 doesn't do much for now. + assert_ok!(Society::vote(Origin::signed(10), 40, true)); + assert_ok!(Society::vote(Origin::signed(10), 60, false)); + conclude_intake(false, None); + assert_eq!(members(), vec![10, 20, 30, 40]); + assert_eq!(candidates(), vec![50, 60, 70]); + assert_eq!(Balances::free_balance(40), 50); + assert_eq!(Balances::reserved_balance(40), 0); + assert_eq!(Balances::free_balance(Society::account_id()), 9990); + + // Founder manually bestows membership on 50 and and kicks 70. + assert_ok!(Society::bestow_membership(Origin::signed(10), 50)); + assert_eq!(members(), vec![10, 20, 30, 40, 50]); + assert_eq!(candidates(), vec![60, 70]); + assert_eq!(Balances::free_balance(50), 50); + assert_eq!(Balances::reserved_balance(50), 0); + assert_eq!(Balances::free_balance(Society::account_id()), 9980); + + assert_eq!(Balances::free_balance(70), 25); + assert_eq!(Balances::reserved_balance(70), 25); + + assert_ok!(Society::kick_candidate(Origin::signed(10), 70)); + assert_eq!(members(), vec![10, 20, 30, 40, 50]); + assert_eq!(candidates(), vec![60]); + assert_eq!(Balances::free_balance(70), 25); + assert_eq!(Balances::reserved_balance(70), 0); + assert_eq!(Balances::free_balance(Society::account_id()), 10005); + + // Next round doesn't make much difference. + next_intake(); + conclude_intake(false, None); + assert_eq!(members(), vec![10, 20, 30, 40, 50]); + assert_eq!(candidates(), vec![60]); + assert_eq!(Balances::free_balance(Society::account_id()), 10005); + + // But after two rounds, the clearly rejected 60 gets dropped and slashed. + next_intake(); + conclude_intake(false, None); + assert_eq!(members(), vec![10, 20, 30, 40, 50]); + assert_eq!(candidates(), vec![]); + assert_eq!(Balances::free_balance(60), 25); + assert_eq!(Balances::reserved_balance(60), 0); + assert_eq!(Balances::free_balance(Society::account_id()), 10030); }); } #[test] -fn vouch_works() { +fn unpaid_vouch_works() { EnvBuilder::new().execute(|| { // 10 is the only member - assert_eq!(Society::members(), vec![10]); + assert_eq!(members(), vec![10]); // A non-member cannot vouch - assert_noop!( - Society::vouch(RuntimeOrigin::signed(1), 20, 1000, 100), - Error::::NotMember - ); + assert_noop!(Society::vouch(Origin::signed(1), 20, 1000, 100), Error::::NotMember); // A member can though - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 1000, 100)); - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Vouching)); // A member cannot vouch twice at the same time assert_noop!( - Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), - Error::::AlreadyVouching + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching ); // Vouching creates the right kind of bid - assert_eq!(>::get(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); + assert_eq!(Bids::::get().into_inner(), vec![bid(20, Vouch(10, 100), 1000)]); // Vouched user can become candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); + next_intake(); + assert_eq!(candidacies(), vec![(20, candidacy(1, 1000, Vouch(10, 100), 0, 0))]); // Vote yes assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); // Vouched user can win - run_to_block(8); - assert_eq!(Society::members(), vec![10, 20]); + conclude_intake(true, None); + assert_eq!(members(), vec![10, 20]); + // Vouched user gets whatever remains after the voucher's reservation. + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(8, 900)].try_into().unwrap() } + ); + // 10 is no longer vouching + assert_eq!(Members::::get(10).unwrap().vouching, None); + }); +} + +#[test] +fn paid_vouch_works() { + EnvBuilder::new().execute(|| { + place_members([20]); + assert_eq!(members(), vec![10, 20]); + + assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); + assert_eq!(Members::::get(20).unwrap().vouching, Some(VouchingStatus::Vouching)); + assert_eq!(Bids::::get().into_inner(), vec![bid(30, Vouch(20, 100), 1000)]); + + next_intake(); + assert_eq!(candidacies(), vec![(30, candidacy(1, 1000, Vouch(20, 100), 0, 0))]); + assert_ok!(Society::vote(Origin::signed(20), 30, true)); + conclude_intake(true, None); + + assert_eq!(members(), vec![10, 20, 30]); // Voucher wins a portion of the payment - assert_eq!(>::get(10), vec![(9, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(8, 100)].try_into().unwrap() } + ); // Vouched user wins the rest - assert_eq!(>::get(20), vec![(9, 900)]); - // 10 is no longer vouching - assert_eq!(>::get(10), None); + assert_eq!( + Payouts::::get(30), + PayoutRecord { paid: 0, payouts: vec![(8, 900)].try_into().unwrap() } + ); + // 20 is no longer vouching + assert_eq!(Members::::get(20).unwrap().vouching, None); }); } #[test] fn voucher_cannot_win_more_than_bid() { EnvBuilder::new().execute(|| { - // 10 is the only member - assert_eq!(Society::members(), vec![10]); - // 10 vouches, but asks for more than the bid - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 1000)); + place_members([20]); + // 20 vouches, but asks for more than the bid + assert_ok!(Society::vouch(Origin::signed(20), 30, 100, 1000)); // Vouching creates the right kind of bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); + assert_eq!(Bids::::get().into_inner(), vec![bid(30, Vouch(20, 1000), 100)]); // Vouched user can become candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); + next_intake(); + assert_eq!(candidacies(), vec![(30, candidacy(1, 100, Vouch(20, 1000), 0, 0))]); // Vote yes - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(20), 30, true)); // Vouched user can win - run_to_block(8); - assert_eq!(Society::members(), vec![10, 20]); + conclude_intake(true, None); + assert_eq!(members(), vec![10, 20, 30]); // Voucher wins as much as the bid - assert_eq!(>::get(10), vec![(9, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(8, 100)].try_into().unwrap() } + ); // Vouched user gets nothing - assert_eq!(>::get(20), vec![]); + assert_eq!( + Payouts::::get(30), + PayoutRecord { paid: 0, payouts: vec![].try_into().unwrap() } + ); }); } @@ -469,52 +693,49 @@ fn voucher_cannot_win_more_than_bid() { fn unvouch_works() { EnvBuilder::new().execute(|| { // 10 is the only member - assert_eq!(Society::members(), vec![10]); + assert_eq!(members(), vec![10]); // 10 vouches for 20 assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); // 20 has a bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); + assert_eq!(Bids::::get().into_inner(), vec![bid(20, Vouch(10, 0), 100)]); // 10 is vouched - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // To unvouch, you must know the right bid position - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 2), Error::::BadPosition); - // 10 can unvouch with the right position - assert_ok!(Society::unvouch(RuntimeOrigin::signed(10), 0)); + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Vouching)); + // 10 can unvouch + assert_ok!(Society::unvouch(Origin::signed(10))); // 20 no longer has a bid - assert_eq!(>::get(), vec![]); + assert_eq!(Bids::::get().into_inner(), vec![]); // 10 is no longer vouching - assert_eq!(>::get(10), None); + assert_eq!(Members::::get(10).unwrap().vouching, None); // Cannot unvouch after they become candidate - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::BadPosition); + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + next_intake(); + assert_eq!(candidacies(), vec![(20, candidacy(1, 100, Vouch(10, 0), 0, 0))]); + assert_noop!(Society::unvouch(Origin::signed(10)), Error::::NotVouchingOnBidder); + // 10 is still vouching until candidate is approved or rejected - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - run_to_block(8); - // In this case candidate is denied and suspended - assert!(Society::suspended_candidate(&20).is_some()); - assert_eq!(Society::members(), vec![10]); - // User is stuck vouching until judgement origin resolves suspended candidate - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // Judge denies candidate - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Reject - )); - // 10 is banned from vouching - assert_eq!(>::get(10), Some(VouchingStatus::Banned)); - assert_eq!(Society::members(), vec![10]); + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Vouching)); + // Voucher inexplicably votes against their pick. + assert_ok!(Society::vote(Origin::signed(10), 20, false)); + // But their pick doesn't resign (yet). + conclude_intake(false, None); + // Voting still happening and voucher cannot unvouch. + assert_eq!(candidacies(), vec![(20, candidacy(1, 100, Vouch(10, 0), 0, 1))]); + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Vouching)); + + // Candidate gives in and resigns. + conclude_intake(true, None); + // Vouxher (10) is banned from vouching. + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Banned)); + assert_eq!(members(), vec![10]); // 10 cannot vouch again assert_noop!( - Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), - Error::::AlreadyVouching + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching ); // 10 cannot unvouch either, so they are banned forever. - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::NotVouching); + assert_noop!(Society::unvouch(Origin::signed(10)), Error::::NotVouchingOnBidder); }); } @@ -522,18 +743,18 @@ fn unvouch_works() { fn unbid_vouch_works() { EnvBuilder::new().execute(|| { // 10 is the only member - assert_eq!(Society::members(), vec![10]); + assert_eq!(members(), vec![10]); // 10 vouches for 20 assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); // 20 has a bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); + assert_eq!(Bids::::get().into_inner(), vec![bid(20, Vouch(10, 0), 100)]); // 10 is vouched - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + assert_eq!(Members::::get(10).unwrap().vouching, Some(VouchingStatus::Vouching)); // 20 doesn't want to be a member and can unbid themselves. - assert_ok!(Society::unbid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::unbid(Origin::signed(20))); // Everything is cleaned up - assert_eq!(>::get(10), None); - assert_eq!(>::get(), vec![]); + assert_eq!(Members::::get(10).unwrap().vouching, None); + assert_eq!(Bids::::get().into_inner(), vec![]); }); } @@ -541,60 +762,73 @@ fn unbid_vouch_works() { fn founder_and_head_cannot_be_removed() { EnvBuilder::new().execute(|| { // 10 is the only member, founder, and head - assert_eq!(Society::members(), vec![10]); - assert_eq!(Society::founder(), Some(10)); - assert_eq!(Society::head(), Some(10)); + assert_eq!(members(), vec![10]); + assert_eq!(Founder::::get(), Some(10)); + assert_eq!(Head::::get(), Some(10)); // 10 can still accumulate strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); - run_to_block(8); - assert_eq!(Strikes::::get(10), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); - run_to_block(16); - assert_eq!(Strikes::::get(10), 2); + assert_ok!(Society::bid(Origin::signed(20), 0)); + next_intake(); + conclude_intake(false, None); + assert_eq!(Members::::get(10).unwrap().strikes, 1); + assert_ok!(Society::bid(Origin::signed(30), 0)); + next_intake(); + conclude_intake(false, None); + assert_eq!(Members::::get(10).unwrap().strikes, 2); // Awkwardly they can obtain more than MAX_STRIKES... - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); - run_to_block(24); - assert_eq!(Strikes::::get(10), 3); + assert_ok!(Society::bid(Origin::signed(40), 0)); + next_intake(); + conclude_intake(false, None); + assert_eq!(Members::::get(10).unwrap().strikes, 3); // Replace the head - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 0)); - run_to_block(28); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around - run_to_block(32); - assert_eq!(Society::members(), vec![10, 50]); - assert_eq!(Society::head(), Some(50)); + assert_ok!(Society::bid(Origin::signed(50), 0)); + next_intake(); + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + conclude_intake(false, None); + assert_eq!(members(), vec![10, 50]); + assert_eq!(Head::::get(), Some(10)); + next_intake(); + assert_eq!(Head::::get(), Some(50)); // Founder is unchanged - assert_eq!(Society::founder(), Some(10)); + assert_eq!(Founder::::get(), Some(10)); // 50 can still accumulate strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(60), 0)); - run_to_block(40); - assert_eq!(Strikes::::get(50), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(70), 0)); - run_to_block(48); - assert_eq!(Strikes::::get(50), 2); + assert_ok!(Society::bid(Origin::signed(60), 0)); + next_intake(); + // Force 50 to be Skeptic so it gets a strike. + Skeptic::::put(50); + conclude_intake(false, None); + assert_eq!(Members::::get(50).unwrap().strikes, 1); + assert_ok!(Society::bid(Origin::signed(70), 0)); + next_intake(); + // Force 50 to be Skeptic so it gets a strike. + Skeptic::::put(50); + conclude_intake(false, None); + assert_eq!(Members::::get(50).unwrap().strikes, 2); // Replace the head - assert_ok!(Society::bid(RuntimeOrigin::signed(80), 0)); - run_to_block(52); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 80, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 80, true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around - run_to_block(56); - assert_eq!(Society::members(), vec![10, 50, 80]); - assert_eq!(Society::head(), Some(80)); - assert_eq!(Society::founder(), Some(10)); + assert_ok!(Society::bid(Origin::signed(80), 0)); + next_intake(); + assert_ok!(Society::vote(Origin::signed(10), 80, true)); + assert_ok!(Society::vote(Origin::signed(50), 80, true)); + conclude_intake(false, None); + next_intake(); + assert_eq!(members(), vec![10, 50, 80]); + assert_eq!(Head::::get(), Some(80)); + assert_eq!(Founder::::get(), Some(10)); // 50 can now be suspended for strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(90), 0)); - run_to_block(60); - // The candidate is rejected, so voting approve will give a strike - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 90, true)); - run_to_block(64); - assert_eq!(Strikes::::get(50), 0); - assert_eq!(>::get(50), true); - assert_eq!(Society::members(), vec![10, 80]); + assert_ok!(Society::bid(Origin::signed(90), 0)); + next_intake(); + // Force 50 to be Skeptic and get it a strike. + Skeptic::::put(50); + conclude_intake(false, None); + next_intake(); + assert_eq!( + SuspendedMembers::::get(50), + Some(MemberRecord { rank: 0, strikes: 3, vouching: None, index: 1 }) + ); + assert_eq!(members(), vec![10, 80]); }); } @@ -602,63 +836,72 @@ fn founder_and_head_cannot_be_removed() { fn challenges_work() { EnvBuilder::new().execute(|| { // Add some members - assert_ok!(Society::add_member(&20)); - assert_ok!(Society::add_member(&30)); - assert_ok!(Society::add_member(&40)); + place_members([20, 30, 40]); // Votes are empty - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); + assert_eq!(DefenderVotes::::get(0, 10), None); + assert_eq!(DefenderVotes::::get(0, 20), None); + assert_eq!(DefenderVotes::::get(0, 30), None); + assert_eq!(DefenderVotes::::get(0, 40), None); // Check starting point - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - assert_eq!(Society::defender(), None); - // 20 will be challenged during the challenge rotation - run_to_block(8); - assert_eq!(Society::defender(), Some(30)); + assert_eq!(members(), vec![10, 20, 30, 40]); + assert_eq!(Defending::::get(), None); + + // 30 will be challenged during the challenge rotation + next_challenge(); + assert_eq!(Defending::::get().unwrap().0, 30); // They can always free vote for themselves - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), true)); + // If no one else votes, nothing happens - run_to_block(16); - assert_eq!(Society::members(), vec![10, 20, 30, 40]); + next_challenge(); + assert_eq!(members(), vec![10, 20, 30, 40]); + // Reset votes for last challenge + assert_ok!(Society::cleanup_challenge(Origin::signed(0), 0, 10)); // New challenge period - assert_eq!(Society::defender(), Some(30)); - // Non-member cannot challenge - assert_noop!( - Society::defender_vote(RuntimeOrigin::signed(1), true), - Error::::NotMember - ); + assert_eq!(Defending::::get().unwrap().0, 30); + // Non-member cannot vote + assert_noop!(Society::defender_vote(Origin::signed(1), true), Error::::NotMember); // 3 people say accept, 1 reject - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); - run_to_block(24); - // 20 survives - assert_eq!(Society::members(), vec![10, 20, 30, 40]); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), true)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); + + next_challenge(); + // 30 survives + assert_eq!(members(), vec![10, 20, 30, 40]); + // Reset votes for last challenge + assert_ok!(Society::cleanup_challenge(Origin::signed(0), 1, 10)); // Votes are reset - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); + assert_eq!(DefenderVotes::::get(0, 10), None); + assert_eq!(DefenderVotes::::get(0, 20), None); + assert_eq!(DefenderVotes::::get(0, 30), None); + assert_eq!(DefenderVotes::::get(0, 40), None); + // One more time - assert_eq!(Society::defender(), Some(30)); + assert_eq!(Defending::::get().unwrap().0, 30); // 2 people say accept, 2 reject - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), false)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); - run_to_block(32); - // 20 is suspended - assert_eq!(Society::members(), vec![10, 20, 40]); - assert_eq!(Society::suspended_member(30), true); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), false)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); + + next_challenge(); + // 30 is suspended + assert_eq!(members(), vec![10, 20, 40]); + assert_eq!( + SuspendedMembers::::get(30), + Some(MemberRecord { rank: 0, strikes: 0, vouching: None, index: 2 }) + ); + // Reset votes for last challenge + assert_ok!(Society::cleanup_challenge(Origin::signed(0), 2, 10)); // New defender is chosen - assert_eq!(Society::defender(), Some(20)); + assert_eq!(Defending::::get().unwrap().0, 20); // Votes are reset - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); + assert_eq!(DefenderVotes::::get(0, 10), None); + assert_eq!(DefenderVotes::::get(0, 20), None); + assert_eq!(DefenderVotes::::get(0, 30), None); + assert_eq!(DefenderVotes::::get(0, 40), None); }); } @@ -666,38 +909,62 @@ fn challenges_work() { fn bad_vote_slash_works() { EnvBuilder::new().execute(|| { // Add some members - assert_ok!(Society::add_member(&20)); - assert_ok!(Society::add_member(&30)); - assert_ok!(Society::add_member(&40)); + place_members([20, 30, 40, 50]); + assert_eq!(members(), vec![10, 20, 30, 40, 50]); // Create some payouts - Society::bump_payout(&10, 5, 100); Society::bump_payout(&20, 5, 100); Society::bump_payout(&30, 5, 100); Society::bump_payout(&40, 5, 100); + Society::bump_payout(&50, 5, 100); // Check starting point - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - assert_eq!(>::get(10), vec![(5, 100)]); - assert_eq!(>::get(20), vec![(5, 100)]); - assert_eq!(>::get(30), vec![(5, 100)]); - assert_eq!(>::get(40), vec![(5, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(30), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(40), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(50), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); // Create a new bid - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 1000)); - run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, false)); - assert_ok!(Society::vote(RuntimeOrigin::signed(20), 50, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(30), 50, false)); - assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, false)); - run_to_block(8); + assert_ok!(Society::bid(Origin::signed(60), 1000)); + next_intake(); + // Force 20 to be the skeptic, and make it vote against the settled majority. + Skeptic::::put(20); + assert_ok!(Society::vote(Origin::signed(20), 60, true)); + assert_ok!(Society::vote(Origin::signed(30), 60, false)); + assert_ok!(Society::vote(Origin::signed(40), 60, false)); + assert_ok!(Society::vote(Origin::signed(50), 60, false)); + conclude_intake(false, None); // Wrong voter gained a strike - assert_eq!(>::get(10), 0); - assert_eq!(>::get(20), 1); - assert_eq!(>::get(30), 0); - assert_eq!(>::get(40), 0); + assert_eq!(Members::::get(20).unwrap().strikes, 1); + assert_eq!(Members::::get(30).unwrap().strikes, 0); + assert_eq!(Members::::get(40).unwrap().strikes, 0); + assert_eq!(Members::::get(50).unwrap().strikes, 0); // Their payout is slashed, a random person is rewarded - assert_eq!(>::get(10), vec![(5, 100), (9, 2)]); - assert_eq!(>::get(20), vec![(5, 98)]); - assert_eq!(>::get(30), vec![(5, 100)]); - assert_eq!(>::get(40), vec![(5, 100)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(5, 50)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(30), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(40), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(50), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); }); } @@ -705,17 +972,14 @@ fn bad_vote_slash_works() { fn user_cannot_bid_twice() { EnvBuilder::new().execute(|| { // Cannot bid twice - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 100)); - assert_noop!(Society::bid(RuntimeOrigin::signed(20), 100), Error::::AlreadyBid); + assert_ok!(Society::bid(Origin::signed(20), 100)); + assert_noop!(Society::bid(Origin::signed(20), 100), Error::::AlreadyBid); // Cannot bid when vouched - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 30, 100, 100)); - assert_noop!(Society::bid(RuntimeOrigin::signed(30), 100), Error::::AlreadyBid); + assert_ok!(Society::vouch(Origin::signed(10), 30, 100, 100)); + assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); // Cannot vouch when already bid - assert_ok!(Society::add_member(&50)); - assert_noop!( - Society::vouch(RuntimeOrigin::signed(50), 20, 100, 100), - Error::::AlreadyBid - ); + place_members([50]); + assert_noop!(Society::vouch(Origin::signed(50), 20, 100, 100), Error::::AlreadyBid); }); } @@ -723,23 +987,19 @@ fn user_cannot_bid_twice() { fn vouching_handles_removed_member_with_bid() { EnvBuilder::new().execute(|| { // Add a member - assert_ok!(Society::add_member(&20)); + place_members([20]); // Have that member vouch for a user assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + assert_eq!(Bids::::get().into_inner(), vec![bid(30, Vouch(20, 100), 1000)]); + assert_eq!(Members::::get(20).unwrap().vouching, Some(VouchingStatus::Vouching)); // Suspend that member - Society::suspend_member(&20); - assert_eq!(>::get(20), true); - // Nothing changes yet - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Remove member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); + assert_ok!(Society::suspend_member(&20)); // Bid is removed, vouching status is removed - assert_eq!(>::get(), vec![]); - assert_eq!(>::get(20), None); + let r = MemberRecord { rank: 0, strikes: 0, vouching: None, index: 1 }; + assert_eq!(SuspendedMembers::::get(20), Some(r)); + assert_eq!(Bids::::get().into_inner(), vec![]); + assert_eq!(Members::::get(20), None); }); } @@ -747,127 +1007,153 @@ fn vouching_handles_removed_member_with_bid() { fn vouching_handles_removed_member_with_candidate() { EnvBuilder::new().execute(|| { // Add a member - assert_ok!(Society::add_member(&20)); + place_members([20]); // Have that member vouch for a user assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + assert_eq!(Bids::::get().into_inner(), vec![bid(30, Vouch(20, 100), 1000)]); + assert_eq!(Members::::get(20).unwrap().vouching, Some(VouchingStatus::Vouching)); + // Make that bid a candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); + next_intake(); + assert_eq!(candidacies(), vec![(30, candidacy(1, 1000, Vouch(20, 100), 0, 0))]); // Suspend that member - Society::suspend_member(&20); - assert_eq!(>::get(20), true); - // Nothing changes yet - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Remove member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); - // Vouching status is removed, but candidate is still in the queue - assert_eq!(>::get(20), None); - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); + assert_ok!(Society::suspend_member(&20)); + assert_eq!(SuspendedMembers::::contains_key(20), true); + + // Nothing changes yet in the candidacy, though the member now forgets. + assert_eq!(candidacies(), vec![(30, candidacy(1, 1000, Vouch(20, 100), 0, 0))]); + // Candidate wins - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - run_to_block(8); - assert_eq!(Society::members(), vec![10, 30]); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + conclude_intake(false, None); + assert_eq!(members(), vec![10, 30]); // Payout does not go to removed member - assert_eq!(>::get(20), vec![]); - assert_eq!(>::get(30), vec![(9, 1000)]); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![].try_into().unwrap() } + ); + assert_eq!( + Payouts::::get(30), + PayoutRecord { paid: 0, payouts: vec![(8, 1000)].try_into().unwrap() } + ); }); } #[test] fn votes_are_working() { EnvBuilder::new().execute(|| { + place_members([20]); // Users make bids of various amounts assert_ok!(Society::bid(RuntimeOrigin::signed(50), 500)); assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); // Rotate period - run_to_block(4); + next_intake(); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(20), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); // You cannot vote for a non-candidate - assert_noop!( - Society::vote(RuntimeOrigin::signed(10), 50, true), - Error::::NotCandidate - ); + assert_noop!(Society::vote(Origin::signed(10), 50, true), Error::::NotCandidate); // Votes are stored - assert_eq!(>::get(30, 10), Some(Vote::Approve)); - assert_eq!(>::get(40, 10), Some(Vote::Approve)); - assert_eq!(>::get(50, 10), None); - run_to_block(8); + assert_eq!(Votes::::get(30, 10), Some(Vote { approve: true, weight: 4 })); + assert_eq!(Votes::::get(30, 20), Some(Vote { approve: true, weight: 1 })); + assert_eq!(Votes::::get(40, 10), Some(Vote { approve: true, weight: 4 })); + assert_eq!(Votes::::get(50, 10), None); + conclude_intake(false, None); + // Cleanup the candidacy + assert_ok!(Society::cleanup_candidacy(Origin::signed(0), 30, 10)); + assert_ok!(Society::cleanup_candidacy(Origin::signed(0), 40, 10)); // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 40]); + assert_eq!(members(), vec![10, 20, 30, 40]); // Votes are cleaned up - assert_eq!(>::get(30, 10), None); - assert_eq!(>::get(40, 10), None); + assert_eq!(Votes::::get(30, 10), None); + assert_eq!(Votes::::get(30, 20), None); + assert_eq!(Votes::::get(40, 10), None); }); } #[test] -fn max_limits_work() { - EnvBuilder::new().with_pot(100000).execute(|| { +fn max_bids_work() { + EnvBuilder::new().execute(|| { // Max bids is 1000, when extra bids come in, it pops the larger ones off the stack. // Try to put 1010 users into the bid pool - for i in (100..1110).rev() { - // Give them some funds - let _ = Balances::make_free_balance_be(&(i as u128), 1000); - assert_ok!(Society::bid(RuntimeOrigin::signed(i as u128), i)); + for i in (0..=10).rev() { + // Give them some funds and bid + let _ = Balances::make_free_balance_be(&((i + 100) as u128), 1000); + assert_ok!(Society::bid(Origin::signed((i + 100) as u128), i)); } - let bids = >::get(); + let bids = Bids::::get(); // Length is 1000 - assert_eq!(bids.len(), 1000); + assert_eq!(bids.len(), 10); // First bid is smallest number (100) - assert_eq!(bids[0], create_bid(100, 100, BidKind::Deposit(25))); + assert_eq!(bids[0], bid(100, Deposit(25), 0)); // Last bid is smallest number + 99 (1099) - assert_eq!(bids[999], create_bid(1099, 1099, BidKind::Deposit(25))); - // Rotate period - run_to_block(4); - // Max of 10 candidates - assert_eq!(Society::candidates().len(), 10); - // Fill up membership, max 100, we will do just 95 - for i in 2000..2095 { - assert_ok!(Society::add_member(&(i as u128))); - } - // Remember there was 1 original member, so 96 total - assert_eq!(Society::members().len(), 96); - // Rotate period - run_to_block(8); - // Only of 4 candidates possible now - assert_eq!(Society::candidates().len(), 4); - // Fill up members with suspended candidates from the first rotation - for i in 100..104 { - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - i, - Judgement::Approve - )); - } - assert_eq!(Society::members().len(), 100); - // Can't add any more members - assert_noop!(Society::add_member(&98), Error::::MaxMembers); - // However, a fringe scenario allows for in-progress candidates to increase the membership - // pool, but it has no real after-effects. - for i in Society::members().iter() { - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 110, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 111, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 112, true)); + assert_eq!(bids[9], bid(109, Deposit(25), 9)); + }); +} + +#[test] +fn candidates_are_limited_by_membership_size() { + EnvBuilder::new().execute(|| { + // Fill up some membership + place_members([1, 2, 3, 4, 5, 6, 7, 8]); + // One place left from 10 + assert_eq!(members().len(), 9); + + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(30), 1)); + next_intake(); + assert_eq!(candidates().len(), 1); + }); +} + +#[test] +fn candidates_are_limited_by_maximum() { + EnvBuilder::new().execute(|| { + // Nine places left from 10 + assert_eq!(members().len(), 1); + + // Nine bids + for i in (1..=9).rev() { + // Give them some funds and bid + let _ = Balances::make_free_balance_be(&((i + 100) as u128), 1000); + assert_ok!(Society::bid(Origin::signed((i + 100) as u128), i)); } - // Rotate period - run_to_block(12); - // Members length is over 100, no problem... - assert_eq!(Society::members().len(), 103); - // No candidates because full - assert_eq!(Society::candidates().len(), 0); - // Increase member limit - assert_ok!(Society::set_max_members(RuntimeOrigin::root(), 200)); - // Rotate period - run_to_block(16); - // Candidates are back! - assert_eq!(Society::candidates().len(), 10); + next_intake(); + + // Still only 8 candidates. + assert_eq!(candidates().len(), 8); + }); +} + +#[test] +fn too_many_candidates_cannot_overflow_membership() { + EnvBuilder::new().execute(|| { + // One place left + place_members([1, 2, 3, 4, 5, 6, 7, 8]); + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(30), 1)); + next_intake(); + // Candidate says a candidate. + next_intake(); + // Another candidate taken. + // Both approved. + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + next_voting(); + assert_ok!(Society::claim_membership(Origin::signed(20))); + assert_noop!(Society::claim_membership(Origin::signed(30)), Error::::MaxMembers); + + // Maximum members. + assert_eq!(members().len(), 10); + // Still 1 candidate. + assert_eq!(candidates().len(), 1); + + // Increase max-members and the candidate can get in. + assert_ok!(Society::set_parameters(Origin::signed(10), 11, 8, 3, 25)); + assert_ok!(Society::claim_membership(Origin::signed(30))); }); } @@ -885,32 +1171,30 @@ fn zero_bid_works() { assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); // Rotate period - run_to_block(4); + next_intake(); // Pot is 1000 after "PeriodSpend" - assert_eq!(Society::pot(), 1000); + assert_eq!(Pot::::get(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot, with only one zero bid. assert_eq!( - Society::candidates(), + candidacies(), vec![ - create_bid(0, 30, BidKind::Deposit(25)), - create_bid(300, 50, BidKind::Deposit(25)), - create_bid(400, 60, BidKind::Deposit(25)), + (30, candidacy(1, 0, Deposit(25), 0, 0)), + (50, candidacy(1, 300, Deposit(25), 0, 0)), + (60, candidacy(1, 400, Deposit(25), 0, 0)), ] ); - assert_eq!( - >::get(), - vec![create_bid(0, 20, BidKind::Deposit(25)), create_bid(0, 40, BidKind::Deposit(25)),] - ); + assert_eq!(Bids::::get(), vec![bid(20, Deposit(25), 0), bid(40, Deposit(25), 0),],); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 60, true)); - run_to_block(8); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + assert_ok!(Society::vote(Origin::signed(10), 60, true)); + conclude_intake(false, None); // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 50, 60]); + assert_eq!(members(), vec![10, 30, 50, 60]); + next_intake(); // The zero bid is selected as head - assert_eq!(Society::head(), Some(30)); + assert_eq!(Head::::get(), Some(30)); }); } @@ -922,8 +1206,9 @@ fn bids_ordered_correctly() { for i in 0..5 { for j in 0..5 { // Give them some funds - let _ = Balances::make_free_balance_be(&(100 + (i * 5 + j) as u128), 1000); - assert_ok!(Society::bid(RuntimeOrigin::signed(100 + (i * 5 + j) as u128), j)); + let who = 100 + (i * 5 + j) as u128; + let _ = Balances::make_free_balance_be(&who, 1000); + assert_ok!(Society::bid(Origin::signed(who), j)); } } @@ -931,10 +1216,79 @@ fn bids_ordered_correctly() { for j in 0..5 { for i in 0..5 { - final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); + final_list.push(bid(100 + (i * 5 + j) as u128, Deposit(25), j)); } } + let max_bids: u32 = ::MaxBids::get(); + final_list.truncate(max_bids as usize); + assert_eq!(Bids::::get(), final_list); + }); +} + +#[test] +fn waive_repay_works() { + EnvBuilder::new().execute(|| { + place_members([20, 30]); + Society::bump_payout(&20, 5, 100); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![(5, 100)].try_into().unwrap() } + ); + assert_eq!(Members::::get(20).unwrap().rank, 0); + assert_ok!(Society::waive_repay(Origin::signed(20), 100)); + assert_eq!( + Payouts::::get(20), + PayoutRecord { paid: 0, payouts: vec![].try_into().unwrap() } + ); + assert_eq!(Members::::get(10).unwrap().rank, 1); + assert_eq!(Balances::free_balance(20), 50); + }); +} + +#[test] +fn punish_skeptic_works() { + EnvBuilder::new().execute(|| { + place_members([20]); + assert_ok!(Society::bid(Origin::signed(30), 0)); + next_intake(); + // Force 20 to be Skeptic so it gets a strike. + Skeptic::::put(20); + next_voting(); + // 30 decides to punish the skeptic (20). + assert_ok!(Society::punish_skeptic(Origin::signed(30))); + // 20 gets 1 strike. + assert_eq!(Members::::get(20).unwrap().strikes, 1); + let candidacy = Candidates::::get(&30).unwrap(); + // 30 candidacy has changed. + assert_eq!(candidacy.skeptic_struck, true); + }); +} - assert_eq!(>::get(), final_list); +#[test] +fn resign_candidacy_works() { + EnvBuilder::new().execute(|| { + assert_ok!(Society::bid(Origin::signed(30), 45)); + next_intake(); + assert_eq!(candidates(), vec![30]); + assert_ok!(Society::resign_candidacy(Origin::signed(30))); + // 30 candidacy has gone. + assert_eq!(candidates(), vec![]); + }); +} + +#[test] +fn drop_candidate_works() { + EnvBuilder::new().execute(|| { + place_members([20, 30]); + assert_ok!(Society::bid(Origin::signed(40), 45)); + next_intake(); + assert_eq!(candidates(), vec![40]); + assert_ok!(Society::vote(Origin::signed(10), 40, false)); + assert_ok!(Society::vote(Origin::signed(20), 40, false)); + assert_ok!(Society::vote(Origin::signed(30), 40, false)); + run_to_block(12); + assert_ok!(Society::drop_candidate(Origin::signed(50), 40)); + // 40 candidacy has gone. + assert_eq!(candidates(), vec![]); }); } diff --git a/frame/society/src/weights.rs b/frame/society/src/weights.rs new file mode 100644 index 0000000000000..d113f617c886c --- /dev/null +++ b/frame/society/src/weights.rs @@ -0,0 +1,375 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_society +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/substrate +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_society +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --template=./.maintain/frame-weight-template.hbs +// --header=./HEADER-APACHE2 +// --output=./frame/society/src/weights.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_society. +pub trait WeightInfo { + fn bid() -> Weight; + fn unbid() -> Weight; + fn vouch() -> Weight; + fn unvouch() -> Weight; + fn vote() -> Weight; + fn defender_vote() -> Weight; + fn payout() -> Weight; + fn waive_repay() -> Weight; + fn found_society() -> Weight; + fn dissolve() -> Weight; + fn judge_suspended_member() -> Weight; + fn set_parameters() -> Weight; + fn punish_skeptic() -> Weight; + fn claim_membership() -> Weight; + fn bestow_membership() -> Weight; + fn kick_candidate() -> Weight; + fn resign_candidacy() -> Weight; + fn drop_candidate() -> Weight; + fn cleanup_candidacy() -> Weight; + fn cleanup_challenge() -> Weight; +} + +/// Weights for pallet_society using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Society Bids (r:1 w:1) + // Storage: Society Candidates (r:1 w:0) + // Storage: Society Members (r:1 w:0) + // Storage: Society SuspendedMembers (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + fn bid() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + fn unbid() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + // Storage: Society Candidates (r:1 w:0) + // Storage: Society Members (r:2 w:1) + // Storage: Society SuspendedMembers (r:1 w:0) + fn vouch() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + // Storage: Society Members (r:1 w:1) + fn unvouch() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society Members (r:1 w:0) + // Storage: Society Votes (r:1 w:1) + fn vote() -> Weight { + Weight::zero() + } + // Storage: Society Defending (r:1 w:1) + // Storage: Society Members (r:1 w:0) + // Storage: Society ChallengeRoundCount (r:1 w:0) + // Storage: Society DefenderVotes (r:1 w:1) + fn defender_vote() -> Weight { + Weight::zero() + } + // Storage: Society Members (r:1 w:0) + // Storage: Society Payouts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn payout() -> Weight { + Weight::zero() + } + // Storage: Society Members (r:1 w:1) + // Storage: Society Payouts (r:1 w:1) + fn waive_repay() -> Weight { + Weight::zero() + } + // Storage: Society Head (r:1 w:1) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Founder (r:0 w:1) + // Storage: Society Rules (r:0 w:1) + // Storage: Society Members (r:0 w:1) + // Storage: Society Parameters (r:0 w:1) + fn found_society() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:1) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society Head (r:0 w:1) + // Storage: Society Defending (r:0 w:1) + // Storage: Society ChallengeRoundCount (r:0 w:1) + // Storage: Society MemberByIndex (r:0 w:5) + // Storage: Society Skeptic (r:0 w:1) + // Storage: Society Candidates (r:0 w:4) + // Storage: Society Pot (r:0 w:1) + // Storage: Society Rules (r:0 w:1) + // Storage: Society Votes (r:0 w:4) + // Storage: Society Members (r:0 w:5) + // Storage: Society RoundCount (r:0 w:1) + // Storage: Society Bids (r:0 w:1) + // Storage: Society Parameters (r:0 w:1) + // Storage: Society NextHead (r:0 w:1) + fn dissolve() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society SuspendedMembers (r:1 w:1) + // Storage: Society Payouts (r:1 w:0) + // Storage: Society Pot (r:1 w:1) + fn judge_suspended_member() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society MemberCount (r:1 w:0) + // Storage: Society Parameters (r:0 w:1) + fn set_parameters() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Skeptic (r:1 w:0) + // Storage: Society Votes (r:1 w:0) + // Storage: Society Members (r:1 w:1) + // Storage: Society Parameters (r:1 w:0) + fn punish_skeptic() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society NextHead (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Members (r:0 w:1) + fn claim_membership() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society NextHead (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Members (r:0 w:1) + fn bestow_membership() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn kick_candidate() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn resign_candidacy() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn drop_candidate() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:0) + // Storage: Society VoteClearCursor (r:1 w:0) + // Storage: Society Votes (r:0 w:2) + fn cleanup_candidacy() -> Weight { + Weight::zero() + } + // Storage: Society ChallengeRoundCount (r:1 w:0) + // Storage: Society DefenderVotes (r:0 w:1) + fn cleanup_challenge() -> Weight { + Weight::zero() + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Society Bids (r:1 w:1) + // Storage: Society Candidates (r:1 w:0) + // Storage: Society Members (r:1 w:0) + // Storage: Society SuspendedMembers (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + fn bid() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + fn unbid() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + // Storage: Society Candidates (r:1 w:0) + // Storage: Society Members (r:2 w:1) + // Storage: Society SuspendedMembers (r:1 w:0) + fn vouch() -> Weight { + Weight::zero() + } + // Storage: Society Bids (r:1 w:1) + // Storage: Society Members (r:1 w:1) + fn unvouch() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society Members (r:1 w:0) + // Storage: Society Votes (r:1 w:1) + fn vote() -> Weight { + Weight::zero() + } + // Storage: Society Defending (r:1 w:1) + // Storage: Society Members (r:1 w:0) + // Storage: Society ChallengeRoundCount (r:1 w:0) + // Storage: Society DefenderVotes (r:1 w:1) + fn defender_vote() -> Weight { + Weight::zero() + } + // Storage: Society Members (r:1 w:0) + // Storage: Society Payouts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn payout() -> Weight { + Weight::zero() + } + // Storage: Society Members (r:1 w:1) + // Storage: Society Payouts (r:1 w:1) + fn waive_repay() -> Weight { + Weight::zero() + } + // Storage: Society Head (r:1 w:1) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Founder (r:0 w:1) + // Storage: Society Rules (r:0 w:1) + // Storage: Society Members (r:0 w:1) + // Storage: Society Parameters (r:0 w:1) + fn found_society() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:1) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society Head (r:0 w:1) + // Storage: Society Defending (r:0 w:1) + // Storage: Society ChallengeRoundCount (r:0 w:1) + // Storage: Society MemberByIndex (r:0 w:5) + // Storage: Society Skeptic (r:0 w:1) + // Storage: Society Candidates (r:0 w:4) + // Storage: Society Pot (r:0 w:1) + // Storage: Society Rules (r:0 w:1) + // Storage: Society Votes (r:0 w:4) + // Storage: Society Members (r:0 w:5) + // Storage: Society RoundCount (r:0 w:1) + // Storage: Society Bids (r:0 w:1) + // Storage: Society Parameters (r:0 w:1) + // Storage: Society NextHead (r:0 w:1) + fn dissolve() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society SuspendedMembers (r:1 w:1) + // Storage: Society Payouts (r:1 w:0) + // Storage: Society Pot (r:1 w:1) + fn judge_suspended_member() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society MemberCount (r:1 w:0) + // Storage: Society Parameters (r:0 w:1) + fn set_parameters() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Skeptic (r:1 w:0) + // Storage: Society Votes (r:1 w:0) + // Storage: Society Members (r:1 w:1) + // Storage: Society Parameters (r:1 w:0) + fn punish_skeptic() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society NextHead (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Members (r:0 w:1) + fn claim_membership() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + // Storage: Society Parameters (r:1 w:0) + // Storage: Society MemberCount (r:1 w:1) + // Storage: Society NextHead (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Society MemberByIndex (r:0 w:1) + // Storage: Society Members (r:0 w:1) + fn bestow_membership() -> Weight { + Weight::zero() + } + // Storage: Society Founder (r:1 w:0) + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn kick_candidate() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn resign_candidacy() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:1) + // Storage: Society RoundCount (r:1 w:0) + fn drop_candidate() -> Weight { + Weight::zero() + } + // Storage: Society Candidates (r:1 w:0) + // Storage: Society VoteClearCursor (r:1 w:0) + // Storage: Society Votes (r:0 w:2) + fn cleanup_candidacy() -> Weight { + Weight::zero() + } + // Storage: Society ChallengeRoundCount (r:1 w:0) + // Storage: Society DefenderVotes (r:0 w:1) + fn cleanup_challenge() -> Weight { + Weight::zero() + } +} diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index c20f003108d76..19a437ce4ccfc 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.136", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +serde = { version = "1.0.163", default-features = false, features = ["alloc", "derive"]} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-session = { version = "4.0.0-dev", default-features = false, features = [ "historical", ], path = "../session" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } log = { version = "0.4.17", default-features = false } @@ -37,8 +37,8 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elections" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } @@ -53,7 +53,7 @@ rand_chacha = { version = "0.2" } default = ["std"] std = [ "frame-benchmarking?/std", - "serde", + "serde/std", "codec/std", "scale-info/std", "sp-std/std", @@ -67,11 +67,33 @@ std = [ "sp-application-crypto/std", "log/std", "frame-election-provider-support/std", + "pallet-bags-list/std", + "pallet-balances/std", + "pallet-timestamp/std", + "sp-core/std", + "sp-npos-elections/std", + "sp-tracing/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand_chacha", "sp-staking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-bags-list/try-runtime", + "pallet-balances/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime", "frame-election-provider-support/try-runtime"] diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 4a97d20a5f0af..b98ab8caef311 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -17,8 +17,8 @@ proc-macro = true [dependencies] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "visit"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "visit"] } [dev-dependencies] -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } diff --git a/frame/staking/reward-fn/Cargo.toml b/frame/staking/reward-fn/Cargo.toml index 0fb034a17202b..3561eeb8a90c2 100644 --- a/frame/staking/reward-fn/Cargo.toml +++ b/frame/staking/reward-fn/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.17", default-features = false } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../../primitives/arithmetic" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../../primitives/arithmetic" } [features] default = ["std"] diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs index 2c7f4613b466e..d34a534c0425d 100644 --- a/frame/staking/reward-fn/src/lib.rs +++ b/frame/staking/reward-fn/src/lib.rs @@ -41,7 +41,7 @@ use sp_arithmetic::{ /// The result is meant to be scaled with minimum inflation and maximum inflation. /// /// (as detailed -/// [here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model-with-parachains)) +/// [here](https://research.web3.foundation/Polkadot/overview/token-economics#inflation-model-with-parachains)) /// /// Arguments are: /// * `stake`: The fraction of total issued tokens that actively staked behind validators. Known as diff --git a/frame/staking/runtime-api/Cargo.toml b/frame/staking/runtime-api/Cargo.toml index 9923b881c38d8..5c9af0ad3cbe8 100644 --- a/frame/staking/runtime-api/Cargo.toml +++ b/frame/staking/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } [features] diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 53589ecfe4dbc..e72a9baf044fe 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -22,17 +22,17 @@ use crate::{ConfigOp, Pallet as Staking}; use testing_utils::*; use codec::Decode; -use frame_election_provider_support::SortedListProvider; +use frame_election_provider_support::{bounds::DataProviderBounds, SortedListProvider}; use frame_support::{ dispatch::UnfilteredDispatchable, pallet_prelude::*, - traits::{Currency, CurrencyToVote, Get, Imbalance}, + traits::{Currency, Get, Imbalance}, }; use sp_runtime::{ traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, Perbill, Percent, }; -use sp_staking::SessionIndex; +use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex}; use sp_std::prelude::*; pub use frame_benchmarking::v1::{ @@ -338,7 +338,7 @@ benchmarks! { validate { let (stash, controller) = create_stash_controller::( - T::MaxNominations::get() - 1, + MaxNominationsOf::::get() - 1, 100, Default::default(), )?; @@ -362,11 +362,11 @@ benchmarks! { // these are the other validators; there are `T::MaxNominations::get() - 1` of them, so // there are a total of `T::MaxNominations::get()` validators in the system. - let rest_of_validators = create_validators_with_seed::(T::MaxNominations::get() - 1, 100, 415)?; + let rest_of_validators = create_validators_with_seed::(MaxNominationsOf::::get() - 1, 100, 415)?; // this is the validator that will be kicking. let (stash, controller) = create_stash_controller::( - T::MaxNominations::get() - 1, + MaxNominationsOf::::get() - 1, 100, Default::default(), )?; @@ -381,7 +381,7 @@ benchmarks! { for i in 0 .. k { // create a nominator stash. let (n_stash, n_controller) = create_stash_controller::( - T::MaxNominations::get() + i, + MaxNominationsOf::::get() + i, 100, Default::default(), )?; @@ -418,7 +418,7 @@ benchmarks! { // Worst case scenario, T::MaxNominations::get() nominate { - let n in 1 .. T::MaxNominations::get(); + let n in 1 .. MaxNominationsOf::::get(); // clean up any existing state. clear_validators_and_nominators::(); @@ -429,7 +429,7 @@ benchmarks! { // we are just doing an insert into the origin position. let scenario = ListScenario::::new(origin_weight, true)?; let (stash, controller) = create_stash_controller_with_balance::( - SEED + T::MaxNominations::get() + 1, // make sure the account does not conflict with others + SEED + MaxNominationsOf::::get() + 1, // make sure the account does not conflict with others origin_weight, Default::default(), ).unwrap(); @@ -711,7 +711,7 @@ benchmarks! { create_validators_with_nominators_for_era::( v, n, - ::MaxNominations::get() as usize, + MaxNominationsOf::::get() as usize, false, None, )?; @@ -729,7 +729,7 @@ benchmarks! { create_validators_with_nominators_for_era::( v, n, - ::MaxNominations::get() as usize, + MaxNominationsOf::::get() as usize, false, None, )?; @@ -808,7 +808,7 @@ benchmarks! { let n in (MaxNominators::::get() / 2) .. MaxNominators::::get(); let validators = create_validators_with_nominators_for_era::( - v, n, T::MaxNominations::get() as usize, false, None + v, n, MaxNominationsOf::::get() as usize, false, None )? .into_iter() .map(|v| T::Lookup::lookup(v).unwrap()) @@ -819,7 +819,8 @@ benchmarks! { let num_voters = (v + n) as usize; }: { - let voters = >::get_npos_voters(None); + // default bounds are unbounded. + let voters = >::get_npos_voters(DataProviderBounds::default()); assert_eq!(voters.len(), num_voters); } @@ -830,10 +831,11 @@ benchmarks! { let n = MaxNominators::::get(); let _ = create_validators_with_nominators_for_era::( - v, n, T::MaxNominations::get() as usize, false, None + v, n, MaxNominationsOf::::get() as usize, false, None )?; }: { - let targets = >::get_npos_targets(None); + // default bounds are unbounded. + let targets = >::get_npos_targets(DataProviderBounds::default()); assert_eq!(targets.len() as u32, v); } @@ -961,7 +963,7 @@ mod tests { create_validators_with_nominators_for_era::( v, n, - ::MaxNominations::get() as usize, + MaxNominationsOf::::get() as usize, false, None, ) diff --git a/frame/staking/src/election_size_tracker.rs b/frame/staking/src/election_size_tracker.rs new file mode 100644 index 0000000000000..283ae0140ee68 --- /dev/null +++ b/frame/staking/src/election_size_tracker.rs @@ -0,0 +1,259 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ## A static size tracker for the election snapshot data. +//! +//! ### Overview +//! +//! The goal of the size tracker is to provide a static, no-allocation byte tracker to be +//! used by the election data provider when preparing the results of +//! [`ElectionDataProvider::electing_voters`]. The [`StaticTracker`] implementation uses +//! [`codec::Encode::size_hint`] to estimate the SCALE encoded size of the snapshot voters struct +//! as it is being constructed without requiring extra stack allocations. +//! +//! The [`StaticTracker::try_register_voter`] is called to update the static tracker internal +//! state, if It will return an error if the resulting SCALE encoded size (in bytes) is larger than +//! the provided `DataProviderBounds`. +//! +//! ### Example +//! +//! ```ignore +//! use pallet_staking::election_size_tracker::*; +//! +//! // instantiates a new tracker. +//! let mut size_tracker = StaticTracker::::default(); +//! +//! let voter_bounds = ElectionBoundsBuilder::default().voter_size(1_00.into()).build().voters; +//! +//! let mut sorted_voters = T::VoterList.iter(); +//! let mut selected_voters = vec![]; +//! +//! // fit as many voters in the vec as the bounds permit. +//! for v in sorted_voters { +//! let voter = (v, weight_of(&v), targets_of(&v)); +//! if size_tracker.try_register_voter(&voter, &voter_bounds).is_err() { +//! // voter bounds size exhausted +//! break; +//! } +//! selected_voters.push(voter); +//! } +//! +//! // The SCALE encoded size in bytes of `selected_voters` is guaranteed to be below +//! // `voter_bounds`. +//! debug_assert!( +//! selected_voters.encoded_size() <= +//! SizeTracker::::final_byte_size_of(size_tracker.num_voters, size_tracker.size) +//! ); +//! ``` +//! +//! ### Implementation Details +//! +//! The current implementation of the static tracker is tightly coupled with the staking pallet +//! implementation, namely the representation of a voter ([`VoterOf`]). The SCALE encoded byte size +//! is calculated using [`Encode::size_hint`] of each type in the voter tuple. Each voter's byte +//! size is the sum of: +//! - 1 * [`Encode::size_hint`] of the `AccountId` type; +//! - 1 * [`Encode::size_hint`] of the `VoteWeight` type; +//! - `num_votes` * [`Encode::size_hint`] of the `AccountId` type. + +use codec::Encode; +use frame_election_provider_support::{ + bounds::{DataProviderBounds, SizeBound}, + ElectionDataProvider, VoterOf, +}; + +/// Keeps track of the SCALE encoded byte length of the snapshot's voters or targets. +/// +/// The tracker calculates the bytes used based on static rules, without requiring any actual +/// encoding or extra allocations. +#[derive(Clone, Copy, Debug)] +pub struct StaticTracker { + pub size: usize, + pub counter: usize, + _marker: sp_std::marker::PhantomData, +} + +impl Default for StaticTracker { + fn default() -> Self { + Self { size: 0, counter: 0, _marker: Default::default() } + } +} + +impl StaticTracker +where + DataProvider: ElectionDataProvider, +{ + /// Tries to register a new voter. + /// + /// If the new voter exhausts the provided bounds, return an error. Otherwise, the internal + /// state of the tracker is updated with the new registered voter. + pub fn try_register_voter( + &mut self, + voter: &VoterOf, + bounds: &DataProviderBounds, + ) -> Result<(), ()> { + let tracker_size_after = { + let voter_hint = Self::voter_size_hint(voter); + Self::final_byte_size_of(self.counter + 1, self.size.saturating_add(voter_hint)) + }; + + match bounds.size_exhausted(SizeBound(tracker_size_after as u32)) { + true => Err(()), + false => { + self.size = tracker_size_after; + self.counter += 1; + Ok(()) + }, + } + } + + /// Calculates the size of the voter to register based on [`Encode::size_hint`]. + fn voter_size_hint(voter: &VoterOf) -> usize { + let (voter_account, vote_weight, targets) = voter; + + voter_account + .size_hint() + .saturating_add(vote_weight.size_hint()) + .saturating_add(voter_account.size_hint().saturating_mul(targets.len())) + } + + /// Tries to register a new target. + /// + /// If the new target exhausts the provided bounds, return an error. Otherwise, the internal + /// state of the tracker is updated with the new registered target. + pub fn try_register_target( + &mut self, + target: DataProvider::AccountId, + bounds: &DataProviderBounds, + ) -> Result<(), ()> { + let tracker_size_after = Self::final_byte_size_of( + self.counter + 1, + self.size.saturating_add(target.size_hint()), + ); + + match bounds.size_exhausted(SizeBound(tracker_size_after as u32)) { + true => Err(()), + false => { + self.size = tracker_size_after; + self.counter += 1; + Ok(()) + }, + } + } + + /// Size of the SCALE encoded prefix with a given length. + #[inline] + fn length_prefix(len: usize) -> usize { + use codec::{Compact, CompactLen}; + Compact::::compact_len(&(len as u32)) + } + + /// Calculates the final size in bytes of the SCALE encoded snapshot voter struct. + fn final_byte_size_of(num_voters: usize, size: usize) -> usize { + Self::length_prefix(num_voters).saturating_add(size) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{AccountId, Staking, Test}, + BoundedVec, MaxNominationsOf, + }; + use frame_election_provider_support::bounds::ElectionBoundsBuilder; + use sp_core::bounded_vec; + + type Voters = BoundedVec>; + + #[test] + pub fn election_size_tracker_works() { + let mut voters: Vec<(u64, u64, Voters)> = vec![]; + let mut size_tracker = StaticTracker::::default(); + let voter_bounds = ElectionBoundsBuilder::default().voters_size(1_50.into()).build().voters; + + // register 1 voter with 1 vote. + let voter = (1, 10, bounded_vec![2]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + // register another voter, now with 3 votes. + let voter = (2, 20, bounded_vec![3, 4, 5]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + // register noop vote (unlikely to happen). + let voter = (3, 30, bounded_vec![]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + } + + #[test] + pub fn election_size_tracker_bounds_works() { + let mut voters: Vec<(u64, u64, Voters)> = vec![]; + let mut size_tracker = StaticTracker::::default(); + let voter_bounds = ElectionBoundsBuilder::default().voters_size(1_00.into()).build().voters; + + let voter = (1, 10, bounded_vec![2]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + assert!(size_tracker.size > 0 && size_tracker.size < 1_00); + let size_before_overflow = size_tracker.size; + + // try many voters that will overflow the tracker's buffer. + let voter = (2, 10, bounded_vec![2, 3, 4, 5, 6, 7, 8, 9]); + voters.push(voter.clone()); + + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_err()); + assert!(size_tracker.size > 0 && size_tracker.size < 1_00); + + // size of the tracker did not update when trying to register votes failed. + assert_eq!(size_tracker.size, size_before_overflow); + } + + #[test] + fn len_prefix_works() { + let length_samples = + vec![0usize, 1, 62, 63, 64, 16383, 16384, 16385, 1073741822, 1073741823, 1073741824]; + + for s in length_samples { + // the encoded size of a vector of n bytes should be n + the length prefix + assert_eq!(vec![1u8; s].encoded_size(), StaticTracker::::length_prefix(s) + s); + } + } +} diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index c87aeb681a226..e59b2a3324a62 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -159,7 +159,7 @@ //! ``` //! use pallet_staking::{self as staking}; //! -//! #[frame_support::pallet] +//! #[frame_support::pallet(dev_mode)] //! pub mod pallet { //! use super::*; //! use frame_support::pallet_prelude::*; @@ -292,6 +292,7 @@ pub(crate) mod mock; #[cfg(test)] mod tests; +pub mod election_size_tracker; pub mod inflation; pub mod migrations; pub mod slashing; @@ -301,7 +302,7 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - traits::{Currency, Defensive, Get}, + traits::{ConstU32, Currency, Defensive, Get}, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -314,12 +315,12 @@ use sp_runtime::{ pub use sp_staking::StakerStatus; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, - EraIndex, SessionIndex, + EraIndex, OnStakingUpdate, SessionIndex, }; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; pub use weights::WeightInfo; -pub use pallet::{pallet::*, *}; +pub use pallet::{pallet::*, UseNominatorsAndValidatorsMap, UseValidatorsMap}; pub(crate) const LOG_TARGET: &str = "runtime::staking"; @@ -338,6 +339,10 @@ macro_rules! log { /// pallet. pub type MaxWinnersOf = <::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners; +/// Maximum number of nominations per nominator. +pub type MaxNominationsOf = + <::NominationsQuota as NominationsQuota>>::MaxNominations; + /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; @@ -549,7 +554,7 @@ impl StakingLedger { /// /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. /// - /// This calls `Config::OnStakerSlash::on_slash` with information as to how the slash was + /// This calls `Config::OnStakingUpdate::on_slash` with information as to how the slash was /// applied. pub fn slash( &mut self, @@ -562,7 +567,6 @@ impl StakingLedger { } use sp_runtime::PerThing as _; - use sp_staking::OnStakerSlash as _; let mut remaining_slash = slash_amount; let pre_slash_total = self.total; @@ -667,7 +671,7 @@ impl StakingLedger { // clean unlocking chunks that are set to zero. self.unlocking.retain(|c| !c.value.is_zero()); - T::OnStakerSlash::on_slash(&self.stash, self.active, &slashed_unlocking); + T::EventListeners::on_slash(&self.stash, self.active, &slashed_unlocking); pre_slash_total.saturating_sub(self.total) } } @@ -680,7 +684,7 @@ impl StakingLedger { #[scale_info(skip_type_params(T))] pub struct Nominations { /// The targets of nomination. - pub targets: BoundedVec, + pub targets: BoundedVec>, /// The era the nominations were submitted. /// /// Except for initial nominations which are considered submitted at era 0. @@ -750,6 +754,36 @@ impl UnappliedSlash { } } +/// Something that defines the maximum number of nominations per nominator based on a curve. +/// +/// The method `curve` implements the nomination quota curve and should not be used directly. +/// However, `get_quota` returns the bounded maximum number of nominations based on `fn curve` and +/// the nominator's balance. +pub trait NominationsQuota { + /// Strict maximum number of nominations that caps the nominations curve. This value can be + /// used as the upper bound of the number of votes per nominator. + type MaxNominations: Get; + + /// Returns the voter's nomination quota within reasonable bounds [`min`, `max`], where `min` + /// is 1 and `max` is `Self::MaxNominations`. + fn get_quota(balance: Balance) -> u32 { + Self::curve(balance).clamp(1, Self::MaxNominations::get()) + } + + /// Returns the voter's nomination quota based on its balance and a curve. + fn curve(balance: Balance) -> u32; +} + +/// A nomination quota that allows up to MAX nominations for all validators. +pub struct FixedNominationsQuota; +impl NominationsQuota for FixedNominationsQuota { + type MaxNominations = ConstU32; + + fn curve(_: Balance) -> u32 { + MAX + } +} + /// Means for interacting with a specialized version of the `session` trait. /// /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` @@ -849,8 +883,19 @@ impl(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV13 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V12_0_0, "Required v12 before upgrading to v13" @@ -84,7 +89,7 @@ pub mod v13 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { frame_support::ensure!( Pallet::::on_chain_storage_version() == 13, "v13 not applied" @@ -114,7 +119,7 @@ pub mod v12 { pub struct MigrateToV12(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV12 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V11_0_0, "Expected v11 before upgrading to v12" @@ -146,7 +151,7 @@ pub mod v12 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V12_0_0, "v12 not applied" @@ -170,7 +175,7 @@ pub mod v11 { for MigrateToV11 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V10_0_0, "must upgrade linearly" @@ -217,7 +222,7 @@ pub mod v11 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V11_0_0, "wrong version after the upgrade" @@ -332,7 +337,7 @@ pub mod v9 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V8_0_0, "must upgrade linearly" @@ -343,17 +348,21 @@ pub mod v9 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(prev_count: Vec) -> Result<(), &'static str> { + fn post_upgrade(prev_count: Vec) -> Result<(), TryRuntimeError> { let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( "the state parameter should be something that was generated by pre_upgrade", ); let post_count = T::VoterList::count(); let validators = Validators::::count(); - assert!(post_count == prev_count + validators); + ensure!( + post_count == prev_count + validators, + "`VoterList` count after the migration must equal to the sum of \ + previous count and the current number of validators" + ); frame_support::ensure!( StorageVersion::::get() == ObsoleteReleases::V9_0_0, - "must upgrade " + "must upgrade" ); Ok(()) } diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 98b58010a2434..cf08f8be1f27d 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -18,12 +18,15 @@ //! Test utilities use crate::{self as pallet_staking, *}; -use frame_election_provider_support::{onchain, SequentialPhragmen, VoteWeight}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, VoteWeight, +}; use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Currency, EitherOfDiverse, FindAuthor, GenesisBuild, Get, Hooks, - Imbalance, OnUnbalanced, OneSessionHandler, + ConstU32, ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, + OnUnbalanced, OneSessionHandler, }, weights::constants::RocksDbWeight, }; @@ -32,8 +35,9 @@ use sp_core::H256; use sp_io; use sp_runtime::{ curve::PiecewiseLinear, - testing::{Header, UintAuthorityId}, + testing::UintAuthorityId, traits::{IdentityLookup, Zero}, + BuildStorage, }; use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; @@ -42,7 +46,7 @@ pub const BLOCK_TIME: u64 = 1000; /// The AccountId alias in this test module. pub(crate) type AccountId = u64; -pub(crate) type AccountIndex = u64; +pub(crate) type Nonce = u64; pub(crate) type BlockNumber = u64; pub(crate) type Balance = u128; @@ -82,14 +86,10 @@ pub fn is_disabled(controller: AccountId) -> bool { Session::disabled_validators().contains(&validator_index) } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Authorship: pallet_authorship, @@ -127,14 +127,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU64<250>; type Version = (); @@ -159,7 +158,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -232,12 +231,12 @@ const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; - pub static MaxNominations: u32 = 16; pub static HistoryDepth: u32 = 80; pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; - pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); pub static MaxWinners: u32 = 100; + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); + pub static AbsoluteMaxNominations: u32 = 16; } type VoterBagsListInstance = pallet_bags_list::Instance1; @@ -257,8 +256,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type MaxWinners = MaxWinners; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; + type Bounds = ElectionsBounds; } pub struct MockReward {} @@ -268,8 +266,14 @@ impl OnUnbalanced> for MockReward { } } -pub struct OnStakerSlashMock(core::marker::PhantomData); -impl sp_staking::OnStakerSlash for OnStakerSlashMock { +parameter_types! { + pub static LedgerSlashPerEra: + (BalanceOf, BTreeMap>) = + (Zero::zero(), BTreeMap::new()); +} + +pub struct EventListenerMock; +impl OnStakingUpdate for EventListenerMock { fn on_slash( _pool_account: &AccountId, slashed_bonded: Balance, @@ -280,11 +284,10 @@ impl sp_staking::OnStakerSlash for OnStakerSlashM } impl crate::pallet::pallet::Config for Test { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type CurrencyToVote = (); type RewardRemainder = RewardRemainderMock; type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -303,13 +306,33 @@ impl crate::pallet::pallet::Config for Test { // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; + type NominationsQuota = WeightedNominationsQuota<16>; type MaxUnlockingChunks = MaxUnlockingChunks; type HistoryDepth = HistoryDepth; - type OnStakerSlash = OnStakerSlashMock; + type EventListeners = EventListenerMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); } +pub struct WeightedNominationsQuota; +impl NominationsQuota for WeightedNominationsQuota +where + u128: From, +{ + type MaxNominations = AbsoluteMaxNominations; + + fn curve(balance: Balance) -> u32 { + match balance.into() { + // random curve for testing. + 0..=110 => MAX, + 111 => 0, + 222 => 2, + 333 => MAX + 10, + _ => MAX, + } + } +} + pub(crate) type StakingCall = crate::Call; pub(crate) type TestCall = ::RuntimeCall; @@ -424,7 +447,7 @@ impl ExtBuilder { } fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let _ = pallet_balances::GenesisConfig:: { balances: vec![ diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 82a0956da7b61..e0f5c95587818 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -18,16 +18,17 @@ //! Implementations for the Staking FRAME Pallet. use frame_election_provider_support::{ - data_provider, BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ScoreProvider, - SortedListProvider, VoteWeight, VoterOf, + bounds::{CountBound, SizeBound}, + data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, + ScoreProvider, SortedListProvider, VoteWeight, VoterOf, }; use frame_support::{ defensive, dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, DefensiveResult, EstimateNextNewSession, Get, - Imbalance, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, + Currency, Defensive, DefensiveResult, EstimateNextNewSession, Get, Imbalance, + LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, }, weights::Weight, }; @@ -38,19 +39,26 @@ use sp_runtime::{ Perbill, }; use sp_staking::{ + currency_to_vote::CurrencyToVote, offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, Stake, StakingInterface, }; use sp_std::prelude::*; use crate::{ - log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, Exposure, ExposureOf, - Forcing, IndividualExposure, MaxWinnersOf, Nominations, PositiveImbalanceOf, RewardDestination, + election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, + BalanceOf, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, MaxNominationsOf, + MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; use super::{pallet::*, STAKING_ID}; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(any(test, feature = "try-runtime"))] +use sp_runtime::TryRuntimeError; + /// The maximum number of iterations that we do whilst iterating over `T::VoterList` in /// `get_npos_voters`. /// @@ -755,13 +763,15 @@ impl Pallet { /// nominators. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - pub fn get_npos_voters(maybe_max_len: Option) -> Vec> { - let max_allowed_len = { - let all_voter_count = T::VoterList::count() as usize; - maybe_max_len.unwrap_or(all_voter_count).min(all_voter_count) + pub fn get_npos_voters(bounds: DataProviderBounds) -> Vec> { + let mut voters_size_tracker: StaticTracker = StaticTracker::default(); + + let final_predicted_len = { + let all_voter_count = T::VoterList::count(); + bounds.count.unwrap_or(all_voter_count.into()).min(all_voter_count.into()).0 }; - let mut all_voters = Vec::<_>::with_capacity(max_allowed_len); + let mut all_voters = Vec::<_>::with_capacity(final_predicted_len as usize); // cache a few things. let weight_of = Self::weight_of_fn(); @@ -772,8 +782,8 @@ impl Pallet { let mut min_active_stake = u64::MAX; let mut sorted_voters = T::VoterList::iter(); - while all_voters.len() < max_allowed_len && - voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_voters.len() < final_predicted_len as usize && + voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * final_predicted_len as u32) { let voter = match sorted_voters.next() { Some(voter) => { @@ -783,13 +793,32 @@ impl Pallet { None => break, }; + let voter_weight = weight_of(&voter); + // if voter weight is zero, do not consider this voter for the snapshot. + if voter_weight.is_zero() { + log!(debug, "voter's active balance is 0. skip this voter."); + continue + } + if let Some(Nominations { targets, .. }) = >::get(&voter) { - let voter_weight = weight_of(&voter); if !targets.is_empty() { - all_voters.push((voter.clone(), voter_weight, targets)); + // Note on lazy nomination quota: we do not check the nomination quota of the + // voter at this point and accept all the current nominations. The nomination + // quota is only enforced at `nominate` time. + + let voter = (voter, voter_weight, targets); + if voters_size_tracker.try_register_voter(&voter, &bounds).is_err() { + // no more space left for the election result, stop iterating. + Self::deposit_event(Event::::SnapshotVotersSizeExceeded { + size: voters_size_tracker.size as u32, + }); + break + } + + all_voters.push(voter); nominators_taken.saturating_inc(); } else { - // Technically should never happen, but not much we can do about it. + // technically should never happen, but not much we can do about it. } min_active_stake = if voter_weight < min_active_stake { voter_weight } else { min_active_stake }; @@ -797,34 +826,41 @@ impl Pallet { // if this voter is a validator: let self_vote = ( voter.clone(), - weight_of(&voter), + voter_weight, vec![voter.clone()] .try_into() .expect("`MaxVotesPerVoter` must be greater than or equal to 1"), ); + + if voters_size_tracker.try_register_voter(&self_vote, &bounds).is_err() { + // no more space left for the election snapshot, stop iterating. + Self::deposit_event(Event::::SnapshotVotersSizeExceeded { + size: voters_size_tracker.size as u32, + }); + break + } all_voters.push(self_vote); validators_taken.saturating_inc(); } else { // this can only happen if: 1. there a bug in the bags-list (or whatever is the // sorted list) logic and the state of the two pallets is no longer compatible, or // because the nominators is not decodable since they have more nomination than - // `T::MaxNominations`. The latter can rarely happen, and is not really an emergency - // or bug if it does. - log!( - warn, - "DEFENSIVE: invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", - voter - ); + // `T::NominationsQuota::get_quota`. The latter can rarely happen, and is not + // really an emergency or bug if it does. + defensive!( + "DEFENSIVE: invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", + voter, + ); } } // all_voters should have not re-allocated. - debug_assert!(all_voters.capacity() == max_allowed_len); + debug_assert!(all_voters.capacity() == final_predicted_len as usize); Self::register_weight(T::WeightInfo::get_npos_voters(validators_taken, nominators_taken)); let min_active_stake: T::CurrencyBalance = - if all_voters.len() == 0 { 0u64.into() } else { min_active_stake.into() }; + if all_voters.is_empty() { Zero::zero() } else { min_active_stake.into() }; MinimumActiveStake::::put(min_active_stake); @@ -842,14 +878,20 @@ impl Pallet { /// Get the targets for an upcoming npos election. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - pub fn get_npos_targets(maybe_max_len: Option) -> Vec { - let max_allowed_len = maybe_max_len.unwrap_or_else(|| T::TargetList::count() as usize); - let mut all_targets = Vec::::with_capacity(max_allowed_len); + pub fn get_npos_targets(bounds: DataProviderBounds) -> Vec { + let mut targets_size_tracker: StaticTracker = StaticTracker::default(); + + let final_predicted_len = { + let all_target_count = T::TargetList::count(); + bounds.count.unwrap_or(all_target_count.into()).min(all_target_count.into()).0 + }; + + let mut all_targets = Vec::::with_capacity(final_predicted_len as usize); let mut targets_seen = 0; let mut targets_iter = T::TargetList::iter(); - while all_targets.len() < max_allowed_len && - targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + while all_targets.len() < final_predicted_len as usize && + targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * final_predicted_len as u32) { let target = match targets_iter.next() { Some(target) => { @@ -859,6 +901,14 @@ impl Pallet { None => break, }; + if targets_size_tracker.try_register_target(target.clone(), &bounds).is_err() { + // no more space left for the election snapshot, stop iterating. + Self::deposit_event(Event::::SnapshotTargetsSizeExceeded { + size: targets_size_tracker.size as u32, + }); + break + } + if Validators::::contains_key(&target) { all_targets.push(target); } @@ -977,46 +1027,51 @@ impl Pallet { /// Returns the current nominations quota for nominators. /// /// Used by the runtime API. - /// Note: for now, this api runtime will always return value of `T::MaxNominations` and thus it - /// is redundant. However, with the upcoming changes in - /// , the nominations quota will change - /// depending on the nominators balance. We're introducing this runtime API now to prepare the - /// community to use it before rolling out PR#12970. - pub fn api_nominations_quota(_balance: BalanceOf) -> u32 { - T::MaxNominations::get() + pub fn api_nominations_quota(balance: BalanceOf) -> u32 { + T::NominationsQuota::get_quota(balance) } } impl ElectionDataProvider for Pallet { type AccountId = T::AccountId; type BlockNumber = BlockNumberFor; - type MaxVotesPerVoter = T::MaxNominations; + type MaxVotesPerVoter = MaxNominationsOf; fn desired_targets() -> data_provider::Result { Self::register_weight(T::DbWeight::get().reads(1)); Ok(Self::validator_count()) } - fn electing_voters(maybe_max_len: Option) -> data_provider::Result>> { + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { // This can never fail -- if `maybe_max_len` is `Some(_)` we handle it. - let voters = Self::get_npos_voters(maybe_max_len); - debug_assert!(maybe_max_len.map_or(true, |max| voters.len() <= max)); + let voters = Self::get_npos_voters(bounds); + + debug_assert!(!bounds.exhausted( + SizeBound(voters.encoded_size() as u32).into(), + CountBound(voters.len() as u32).into() + )); Ok(voters) } - fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { - let target_count = T::TargetList::count(); + fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { + let targets = Self::get_npos_targets(bounds); - // We can't handle this case yet -- return an error. - if maybe_max_len.map_or(false, |max_len| target_count > max_len as u32) { + // We can't handle this case yet -- return an error. WIP to improve handling this case in + // . + if bounds.exhausted(None, CountBound(T::TargetList::count() as u32).into()) { return Err("Target snapshot too big") } - Ok(Self::get_npos_targets(None)) + debug_assert!(!bounds.exhausted( + SizeBound(targets.encoded_size() as u32).into(), + CountBound(targets.len() as u32).into() + )); + + Ok(targets) } - fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { + fn next_election_prediction(now: BlockNumberFor) -> BlockNumberFor { let current_era = Self::current_era().unwrap_or(0); let current_session = Self::current_planned_session(); let current_era_start_session_index = @@ -1033,7 +1088,7 @@ impl ElectionDataProvider for Pallet { let session_length = T::NextNewSession::average_session_length(); - let sessions_left: T::BlockNumber = match ForceEra::::get() { + let sessions_left: BlockNumberFor = match ForceEra::::get() { Forcing::ForceNone => Bounded::max_value(), Forcing::ForceNew | Forcing::ForceAlways => Zero::zero(), Forcing::NotForcing if era_progress >= T::SessionsPerEra::get() => Zero::zero(), @@ -1232,7 +1287,7 @@ impl historical::SessionManager pallet_authorship::EventHandler for Pallet +impl pallet_authorship::EventHandler> for Pallet where T: Config + pallet_authorship::Config + pallet_session::Config, { @@ -1467,7 +1522,7 @@ impl SortedListProvider for UseValidatorsMap { 0 } #[cfg(feature = "try-runtime")] - fn try_state() -> Result<(), &'static str> { + fn try_state() -> Result<(), TryRuntimeError> { Ok(()) } @@ -1544,7 +1599,7 @@ impl SortedListProvider for UseNominatorsAndValidatorsM } #[cfg(feature = "try-runtime")] - fn try_state() -> Result<(), &'static str> { + fn try_state() -> Result<(), TryRuntimeError> { Ok(()) } @@ -1563,10 +1618,10 @@ impl SortedListProvider for UseNominatorsAndValidatorsM } } -// NOTE: in this entire impl block, the assumption is that `who` is a stash account. impl StakingInterface for Pallet { type AccountId = T::AccountId; type Balance = BalanceOf; + type CurrencyToVote = T::CurrencyToVote; fn minimum_nominator_bond() -> Self::Balance { MinNominatorBond::::get() @@ -1713,7 +1768,7 @@ impl StakingInterface for Pallet { #[cfg(any(test, feature = "try-runtime"))] impl Pallet { - pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { + pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { ensure!( T::VoterList::iter() .all(|x| >::contains_key(&x) || >::contains_key(&x)), @@ -1726,7 +1781,7 @@ impl Pallet { Self::check_count() } - fn check_count() -> Result<(), &'static str> { + fn check_count() -> Result<(), TryRuntimeError> { ensure!( ::VoterList::count() == Nominators::::count() + Validators::::count(), @@ -1739,18 +1794,19 @@ impl Pallet { ensure!( ValidatorCount::::get() <= ::MaxWinners::get(), - "validator count exceeded election max winners" + Error::::TooManyValidators ); Ok(()) } - fn check_ledgers() -> Result<(), &'static str> { + fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(_, ctrl)| Self::ensure_ledger_consistent(ctrl)) - .collect::>() + .collect::, _>>()?; + Ok(()) } - fn check_exposures() -> Result<(), &'static str> { + fn check_exposures() -> Result<(), TryRuntimeError> { // a check per validator to ensure the exposure struct is always sane. let era = Self::active_era().unwrap().index; ErasStakers::::iter_prefix_values(era) @@ -1766,10 +1822,10 @@ impl Pallet { ); Ok(()) }) - .collect::>() + .collect::>() } - fn check_nominators() -> Result<(), &'static str> { + fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. let era = Self::active_era().unwrap().index; @@ -1783,27 +1839,33 @@ impl Pallet { } }, ) - .map(|nominator| { + .map(|nominator| -> Result<(), TryRuntimeError> { // must be bonded. Self::ensure_is_stash(&nominator)?; let mut sum = BalanceOf::::zero(); T::SessionInterface::validators() .iter() .map(|v| Self::eras_stakers(era, v)) - .map(|e| { + .map(|e| -> Result<(), TryRuntimeError> { let individual = e.others.iter().filter(|e| e.who == nominator).collect::>(); let len = individual.len(); match len { 0 => { /* not supporting this validator at all. */ }, 1 => sum += individual[0].value, - _ => return Err("nominator cannot back a validator more than once."), + _ => + return Err( + "nominator cannot back a validator more than once.".into() + ), }; Ok(()) }) - .collect::>() + .collect::, _>>()?; + Ok(()) }) - .collect::>() + .collect::, _>>()?; + + Ok(()) } fn ensure_is_stash(who: &T::AccountId) -> Result<(), &'static str> { @@ -1811,17 +1873,13 @@ impl Pallet { Ok(()) } - fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), &'static str> { + fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), TryRuntimeError> { // ensures ledger.total == ledger.active + sum(ledger.unlocking). let ledger = Self::ledger(ctrl.clone()).ok_or("Not a controller.")?; let real_total: BalanceOf = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); ensure!(real_total == ledger.total, "ledger.total corrupt"); - if !(ledger.active >= T::Currency::minimum_balance() || ledger.active.is_zero()) { - log!(warn, "ledger.active less than ED: {:?}, {:?}", ctrl, ledger) - } - Ok(()) } } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 2b33573ac210f..40a2f5cf73eb1 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -24,7 +24,7 @@ use frame_support::{ dispatch::Codec, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, + Currency, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, }, @@ -45,9 +45,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, - RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, - ValidatorPrefs, + EraRewardPoints, Exposure, Forcing, MaxNominationsOf, NegativeImbalanceOf, Nominations, + NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, + UnappliedSlash, UnlockChunk, ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -87,7 +87,7 @@ pub mod pallet { /// The staking balance. type Currency: LockableCurrency< Self::AccountId, - Moment = Self::BlockNumber, + Moment = BlockNumberFor, Balance = Self::CurrencyBalance, >; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to @@ -113,25 +113,24 @@ pub mod pallet { /// in 128. /// Consequently, the backward convert is used convert the u128s from sp-elections back to a /// [`BalanceOf`]. - type CurrencyToVote: CurrencyToVote>; + type CurrencyToVote: sp_staking::currency_to_vote::CurrencyToVote>; /// Something that provides the election functionality. type ElectionProvider: ElectionProvider< AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, + BlockNumber = BlockNumberFor, // we only accept an election provider that has staking as data provider. DataProvider = Pallet, >; /// Something that provides the election functionality at genesis. type GenesisElectionProvider: ElectionProvider< AccountId = Self::AccountId, - BlockNumber = Self::BlockNumber, + BlockNumber = BlockNumberFor, DataProvider = Pallet, >; - /// Maximum number of nominations per nominator. - #[pallet::constant] - type MaxNominations: Get; + /// Something that defines the maximum number of nominations per nominator. + type NominationsQuota: NominationsQuota>; /// Number of eras to keep in history. /// @@ -200,7 +199,7 @@ pub mod pallet { /// Something that can estimate the next session change, accurately or as a best effort /// guess. - type NextNewSession: EstimateNextNewSession; + type NextNewSession: EstimateNextNewSession>; /// The maximum number of nominators rewarded for each validator. /// @@ -261,9 +260,11 @@ pub mod pallet { #[pallet::constant] type MaxUnlockingChunks: Get; - /// A hook called when any staker is slashed. Mostly likely this can be a no-op unless - /// other pallets exist that are affected by slashing per-staker. - type OnStakerSlash: sp_staking::OnStakerSlash>; + /// Something that listens to staking updates and performs actions based on the data it + /// receives. + /// + /// WARNING: this only reports slashing events for the time being. + type EventListeners: sp_staking::OnStakingUpdate>; /// Some parameters of the benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -346,7 +347,8 @@ pub mod pallet { /// they wish to support. /// /// Note that the keys of this storage map might become non-decodable in case the - /// [`Config::MaxNominations`] configuration is decreased. In this rare case, these nominators + /// account's [`NominationsQuota::MaxNominations`] configuration is decreased. + /// In this rare case, these nominators /// are still existent in storage, their key is correct and retrievable (i.e. `contains_key` /// indicates that they exist), but their value cannot be decoded. Therefore, the non-decodable /// nominators will effectively not-exist, until they re-submit their preferences such that it @@ -596,7 +598,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { ValidatorCount::::put(self.validator_count); MinimumValidatorCount::::put(self.minimum_validator_count); @@ -694,6 +696,10 @@ pub mod pallet { PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, /// A validator has set their preferences. ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, + /// Voters size limit reached. + SnapshotVotersSizeExceeded { size: u32 }, + /// Targets size limit reached. + SnapshotTargetsSizeExceeded { size: u32 }, /// A new force era mode was set. ForceEra { mode: Forcing }, } @@ -780,11 +786,11 @@ pub mod pallet { fn integrity_test() { // ensure that we funnel the correct value to the `DataProvider::MaxVotesPerVoter`; assert_eq!( - T::MaxNominations::get(), + MaxNominationsOf::::get(), ::MaxVotesPerVoter::get() ); // and that MaxNominations is always greater than 1, since we count on this. - assert!(!T::MaxNominations::get().is_zero()); + assert!(!MaxNominationsOf::::get().is_zero()); // ensure election results are always bounded with the same value assert!( @@ -792,20 +798,16 @@ pub mod pallet { ::MaxWinners::get() ); - sp_std::if_std! { - sp_io::TestExternalities::new_empty().execute_with(|| - assert!( - T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, - "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", - T::SlashDeferDuration::get(), - T::BondingDuration::get(), - ) - ); - } + assert!( + T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, + "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", + T::SlashDeferDuration::get(), + T::BondingDuration::get(), + ) } #[cfg(feature = "try-runtime")] - fn try_state(n: BlockNumberFor) -> Result<(), &'static str> { + fn try_state(n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state(n) } } @@ -1006,9 +1008,7 @@ pub mod pallet { // Note: in case there is no current era it is fine to bond one era more. let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); - if let Some(mut chunk) = - ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) - { + if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { // To keep the chunk count down, we only keep one chunk per era. Since // `unlocking` is a FiFo queue, if a chunk exists for `era` we know that it will // be the last one. @@ -1042,8 +1042,8 @@ pub mod pallet { /// Remove any unlocked chunks from the `unlocking` queue from our management. /// - /// This essentially frees up that balance to be used by the stash account to do - /// whatever it wants. + /// This essentially frees up that balance to be used by the stash account to do whatever + /// it wants. /// /// The dispatch origin for this call must be _Signed_ by the controller. /// @@ -1051,6 +1051,15 @@ pub mod pallet { /// /// See also [`Call::unbond`]. /// + /// ## Parameters + /// + /// - `num_slashing_spans` indicates the number of metadata slashing spans to clear when + /// this call results in a complete removal of all the data related to the stash account. + /// In this case, the `num_slashing_spans` must be larger or equal to the number of + /// slashing spans associated with the stash account in the [`SlashingSpans`] storage type, + /// otherwise the call will fail. The call weight is directly propotional to + /// `num_slashing_spans`. + /// /// ## Complexity /// O(S) where S is the number of slashing spans to remove /// NOTE: Weight annotation is the kill scenario, we refund otherwise. @@ -1140,7 +1149,10 @@ pub mod pallet { } ensure!(!targets.is_empty(), Error::::EmptyTargets); - ensure!(targets.len() <= T::MaxNominations::get() as usize, Error::::TooManyTargets); + ensure!( + targets.len() <= T::NominationsQuota::get_quota(ledger.active) as usize, + Error::::TooManyTargets + ); let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets.into_inner()); @@ -1379,6 +1391,11 @@ pub mod pallet { /// Force a current staker to become completely unstaked, immediately. /// /// The dispatch origin must be Root. + /// + /// ## Parameters + /// + /// - `num_slashing_spans`: Refer to comments on [`Call::withdraw_unbonded`] for more + /// details. #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::force_unstake(*num_slashing_spans))] pub fn force_unstake( @@ -1519,6 +1536,11 @@ pub mod pallet { /// It can be called by anyone, as long as `stash` meets the above requirements. /// /// Refunds the transaction fees upon successful execution. + /// + /// ## Parameters + /// + /// - `num_slashing_spans`: Refer to comments on [`Call::withdraw_unbonded`] for more + /// details. #[pallet::call_index(20)] #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] pub fn reap_stash( diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index e3ee4cd1a8e9f..29539cbb84cf7 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -18,7 +18,10 @@ //! Tests for the module. use super::{ConfigOp, Event, *}; -use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; +use frame_election_provider_support::{ + bounds::{DataProviderBounds, ElectionBoundsBuilder}, + ElectionProvider, SortedListProvider, Support, +}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, @@ -4508,29 +4511,111 @@ mod election_data_provider { .add_staker(71, 71, 10, StakerStatus::::Nominator(vec![21])) .add_staker(81, 81, 50, StakerStatus::::Nominator(vec![21])) .build_and_execute(|| { - assert_ok!(::electing_voters(None)); + // default bounds are unbounded. + assert_ok!(::electing_voters( + DataProviderBounds::default() + )); assert_eq!(MinimumActiveStake::::get(), 10); // remove staker with lower bond by limiting the number of voters and check // `MinimumActiveStake` again after electing voters. - assert_ok!(::electing_voters(Some(5))); + let bounds = ElectionBoundsBuilder::default().voters_count(5.into()).build(); + assert_ok!(::electing_voters(bounds.voters)); assert_eq!(MinimumActiveStake::::get(), 50); }); } #[test] - fn set_minimum_active_stake_zero_correct() { + fn set_minimum_active_stake_lower_bond_works() { + // if there are no voters, minimum active stake is zero (should not happen). ExtBuilder::default().has_stakers(false).build_and_execute(|| { - assert_ok!(::electing_voters(None)); + // default bounds are unbounded. + assert_ok!(::electing_voters( + DataProviderBounds::default() + )); + assert_eq!(::VoterList::count(), 0); assert_eq!(MinimumActiveStake::::get(), 0); }); + + // lower non-zero active stake below `MinNominatorBond` is the minimum active stake if + // it is selected as part of the npos voters. + ExtBuilder::default().has_stakers(true).nominate(true).build_and_execute(|| { + assert_eq!(MinNominatorBond::::get(), 1); + assert_eq!(::VoterList::count(), 4); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(4), 5, Default::default(),)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); + assert_eq!(::VoterList::count(), 5); + + let voters_before = + ::electing_voters(DataProviderBounds::default()) + .unwrap(); + assert_eq!(MinimumActiveStake::::get(), 5); + + // update minimum nominator bond. + MinNominatorBond::::set(10); + assert_eq!(MinNominatorBond::::get(), 10); + // voter list still considers nominator 4 for voting, even though its active stake is + // lower than `MinNominatorBond`. + assert_eq!(::VoterList::count(), 5); + + let voters = + ::electing_voters(DataProviderBounds::default()) + .unwrap(); + assert_eq!(voters_before, voters); + + // minimum active stake is lower than `MinNominatorBond`. + assert_eq!(MinimumActiveStake::::get(), 5); + }); + } + + #[test] + fn set_minimum_active_bond_corrupt_state() { + ExtBuilder::default() + .has_stakers(true) + .nominate(true) + .add_staker(61, 61, 2_000, StakerStatus::::Nominator(vec![21])) + .build_and_execute(|| { + assert_eq!(Staking::weight_of(&101), 500); + let voters = ::electing_voters( + DataProviderBounds::default(), + ) + .unwrap(); + assert_eq!(voters.len(), 5); + assert_eq!(MinimumActiveStake::::get(), 500); + + assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 200)); + start_active_era(10); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 100)); + start_active_era(20); + + // corrupt ledger state by lowering max unlocking chunks bounds. + MaxUnlockingChunks::set(1); + + let voters = ::electing_voters( + DataProviderBounds::default(), + ) + .unwrap(); + // number of returned voters decreases since ledger entry of stash 101 is now + // corrupt. + assert_eq!(voters.len(), 4); + // minimum active stake does not take into consideration the corrupt entry. + assert_eq!(MinimumActiveStake::::get(), 2_000); + + // voter weight of corrupted ledger entry is 0. + assert_eq!(Staking::weight_of(&101), 0); + + // reset max unlocking chunks for try_state to pass. + MaxUnlockingChunks::set(32); + }) } #[test] fn voters_include_self_vote() { ExtBuilder::default().nominate(false).build_and_execute(|| { + // default bounds are unbounded. assert!(>::iter().map(|(x, _)| x).all(|v| Staking::electing_voters( - None + DataProviderBounds::default() ) .unwrap() .into_iter() @@ -4538,39 +4623,11 @@ mod election_data_provider { }) } - #[test] - fn respects_snapshot_len_limits() { - ExtBuilder::default() - .set_status(41, StakerStatus::Validator) - .build_and_execute(|| { - // sum of all nominators who'd be voters (1), plus the self-votes (4). - assert_eq!(::VoterList::count(), 5); - - // if limits is less.. - assert_eq!(Staking::electing_voters(Some(1)).unwrap().len(), 1); - - // if limit is equal.. - assert_eq!(Staking::electing_voters(Some(5)).unwrap().len(), 5); - - // if limit is more. - assert_eq!(Staking::electing_voters(Some(55)).unwrap().len(), 5); - - // if target limit is more.. - assert_eq!(Staking::electable_targets(Some(6)).unwrap().len(), 4); - assert_eq!(Staking::electable_targets(Some(4)).unwrap().len(), 4); - - // if target limit is less, then we return an error. - assert_eq!( - Staking::electable_targets(Some(1)).unwrap_err(), - "Target snapshot too big" - ); - }); - } - // Tests the criteria that in `ElectionDataProvider::voters` function, we try to get at most // `maybe_max_len` voters, and if some of them end up being skipped, we iterate at most `2 * // maybe_max_len`. #[test] + #[should_panic] fn only_iterates_max_2_times_max_allowed_len() { ExtBuilder::default() .nominate(false) @@ -4595,13 +4652,14 @@ mod election_data_provider { StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), ) .build_and_execute(|| { + let bounds_builder = ElectionBoundsBuilder::default(); // all voters ordered by stake, assert_eq!( ::VoterList::iter().collect::>(), vec![61, 71, 81, 11, 21, 31] ); - MaxNominations::set(2); + AbsoluteMaxNominations::set(2); // we want 2 voters now, and in maximum we allow 4 iterations. This is what happens: // 61 is pruned; @@ -4610,7 +4668,7 @@ mod election_data_provider { // 11 is taken; // we finish since the 2x limit is reached. assert_eq!( - Staking::electing_voters(Some(2)) + Staking::electing_voters(bounds_builder.voters_count(2.into()).build().voters) .unwrap() .iter() .map(|(stash, _, _)| stash) @@ -4621,6 +4679,189 @@ mod election_data_provider { }); } + #[test] + fn respects_snapshot_count_limits() { + ExtBuilder::default() + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + // sum of all nominators who'd be voters (1), plus the self-votes (4). + assert_eq!(::VoterList::count(), 5); + + let bounds_builder = ElectionBoundsBuilder::default(); + + // if voter count limit is less.. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(1.into()).build().voters) + .unwrap() + .len(), + 1 + ); + + // if voter count limit is equal.. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(5.into()).build().voters) + .unwrap() + .len(), + 5 + ); + + // if voter count limit is more. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(55.into()).build().voters) + .unwrap() + .len(), + 5 + ); + + // if target count limit is more.. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(6.into()).build().targets + ) + .unwrap() + .len(), + 4 + ); + + // if target count limit is equal.. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(4.into()).build().targets + ) + .unwrap() + .len(), + 4 + ); + + // if target limit count is less, then we return an error. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(1.into()).build().targets + ) + .unwrap_err(), + "Target snapshot too big" + ); + }); + } + + #[test] + fn respects_snapshot_size_limits() { + ExtBuilder::default().build_and_execute(|| { + // voters: set size bounds that allows only for 1 voter. + let bounds = ElectionBoundsBuilder::default().voters_size(26.into()).build(); + let elected = Staking::electing_voters(bounds.voters).unwrap(); + assert!(elected.encoded_size() == 26 as usize); + let prev_len = elected.len(); + + // larger size bounds means more quota for voters. + let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); + let elected = Staking::electing_voters(bounds.voters).unwrap(); + assert!(elected.encoded_size() <= 100 as usize); + assert!(elected.len() > 1 && elected.len() > prev_len); + + // targets: set size bounds that allows for only one target to fit in the snapshot. + let bounds = ElectionBoundsBuilder::default().targets_size(10.into()).build(); + let elected = Staking::electable_targets(bounds.targets).unwrap(); + assert!(elected.encoded_size() == 9 as usize); + let prev_len = elected.len(); + + // larger size bounds means more space for targets. + let bounds = ElectionBoundsBuilder::default().targets_size(100.into()).build(); + let elected = Staking::electable_targets(bounds.targets).unwrap(); + assert!(elected.encoded_size() <= 100 as usize); + assert!(elected.len() > 1 && elected.len() > prev_len); + }); + } + + #[test] + fn nomination_quota_checks_at_nominate_works() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // stash bond of 222 has a nomination quota of 2 targets. + bond(61, 222); + assert_eq!(Staking::api_nominations_quota(222), 2); + + // nominating with targets below the nomination quota works. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(61), vec![11])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(61), vec![11, 12])); + + // nominating with targets above the nomination quota returns error. + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(61), vec![11, 12, 13]), + Error::::TooManyTargets + ); + }); + } + + #[test] + fn lazy_quota_npos_voters_works_above_quota() { + ExtBuilder::default() + .nominate(false) + .add_staker( + 61, + 60, + 300, // 300 bond has 16 nomination quota. + StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), + ) + .build_and_execute(|| { + // unbond 78 from stash 60 so that it's bonded balance is 222, which has a lower + // nomination quota than at nomination time (max 2 targets). + assert_ok!(Staking::unbond(RuntimeOrigin::signed(61), 78)); + assert_eq!(Staking::api_nominations_quota(300 - 78), 2); + + // even through 61 has nomination quota of 2 at the time of the election, all the + // nominations (5) will be used. + assert_eq!( + Staking::electing_voters(DataProviderBounds::default()) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1), (61, 5)], + ); + }); + } + + #[test] + fn nominations_quota_limits_size_work() { + ExtBuilder::default() + .nominate(false) + .add_staker( + 71, + 70, + 333, + StakerStatus::::Nominator(vec![16, 15, 14, 13, 12, 11, 10]), + ) + .build_and_execute(|| { + // nominations of controller 70 won't be added due to voter size limit exceeded. + let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); + assert_eq!( + Staking::electing_voters(bounds.voters) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1)], + ); + + assert_eq!( + *staking_events().last().unwrap(), + Event::SnapshotVotersSizeExceeded { size: 75 } + ); + + // however, if the election voter size bounds were largers, the snapshot would + // include the electing voters of 70. + let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); + assert_eq!( + Staking::electing_voters(bounds.voters) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1), (71, 7)], + ); + }); + } + #[test] fn estimate_next_election_works() { ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { @@ -5056,7 +5297,8 @@ fn min_commission_works() { } #[test] -fn change_of_max_nominations() { +#[should_panic] +fn change_of_absolute_max_nominations() { use frame_election_provider_support::ElectionDataProvider; ExtBuilder::default() .add_staker(61, 61, 10, StakerStatus::Nominator(vec![1])) @@ -5064,7 +5306,7 @@ fn change_of_max_nominations() { .balance_factor(10) .build_and_execute(|| { // pre-condition - assert_eq!(MaxNominations::get(), 16); + assert_eq!(AbsoluteMaxNominations::get(), 16); assert_eq!( Nominators::::iter() @@ -5072,11 +5314,15 @@ fn change_of_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); + + // default bounds are unbounded. + let bounds = DataProviderBounds::default(); + // 3 validators and 3 nominators - assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // abrupt change from 16 to 4, everyone should be fine. - MaxNominations::set(4); + AbsoluteMaxNominations::set(4); assert_eq!( Nominators::::iter() @@ -5084,10 +5330,10 @@ fn change_of_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // abrupt change from 4 to 3, everyone should be fine. - MaxNominations::set(3); + AbsoluteMaxNominations::set(3); assert_eq!( Nominators::::iter() @@ -5095,11 +5341,11 @@ fn change_of_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // abrupt change from 3 to 2, this should cause some nominators to be non-decodable, and // thus non-existent unless if they update. - MaxNominations::set(2); + AbsoluteMaxNominations::set(2); assert_eq!( Nominators::::iter() @@ -5112,12 +5358,12 @@ fn change_of_max_nominations() { // but its value cannot be decoded and default is returned. assert!(Nominators::::get(71).is_none()); - assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 2); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 2); assert!(Nominators::::contains_key(101)); // abrupt change from 2 to 1, this should cause some nominators to be non-decodable, and // thus non-existent unless if they update. - MaxNominations::set(1); + AbsoluteMaxNominations::set(1); assert_eq!( Nominators::::iter() @@ -5129,7 +5375,7 @@ fn change_of_max_nominations() { assert!(Nominators::::contains_key(61)); assert!(Nominators::::get(71).is_none()); assert!(Nominators::::get(61).is_some()); - assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 1); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 1); // now one of them can revive themselves by re-nominating to a proper value. assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); @@ -5149,6 +5395,42 @@ fn change_of_max_nominations() { }) } +#[test] +fn nomination_quota_max_changes_decoding() { + use frame_election_provider_support::ElectionDataProvider; + ExtBuilder::default() + .add_staker(60, 61, 10, StakerStatus::Nominator(vec![1])) + .add_staker(70, 71, 10, StakerStatus::Nominator(vec![1, 2, 3])) + .add_staker(30, 330, 10, StakerStatus::Nominator(vec![1, 2, 3, 4])) + .add_staker(50, 550, 10, StakerStatus::Nominator(vec![1, 2, 3, 4])) + .balance_factor(10) + .build_and_execute(|| { + // pre-condition. + assert_eq!(MaxNominationsOf::::get(), 16); + + let unbonded_election = DataProviderBounds::default(); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(70, 3), (101, 2), (50, 4), (30, 4), (60, 1)] + ); + // 4 validators and 4 nominators + assert_eq!(Staking::electing_voters(unbonded_election).unwrap().len(), 4 + 4); + }); +} + +#[test] +fn api_nominations_quota_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::api_nominations_quota(10), MaxNominationsOf::::get()); + assert_eq!(Staking::api_nominations_quota(333), MaxNominationsOf::::get()); + assert_eq!(Staking::api_nominations_quota(222), 2); + assert_eq!(Staking::api_nominations_quota(111), 1); + }) +} + mod sorted_list_provider { use super::*; use frame_election_provider_support::SortedListProvider; diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 34b01445d965a..f2c65e677cac8 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_staking +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_staking. pub trait WeightInfo { @@ -99,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1047` // Estimated: `4764` - // Minimum execution time: 54_907_000 picoseconds. - Weight::from_parts(55_685_000, 4764) + // Minimum execution time: 53_983_000 picoseconds. + Weight::from_parts(55_296_000, 4764) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -120,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2028` // Estimated: `8877` - // Minimum execution time: 94_779_000 picoseconds. - Weight::from_parts(95_455_000, 8877) + // Minimum execution time: 96_590_000 picoseconds. + Weight::from_parts(98_921_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -137,8 +141,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:0) @@ -149,10 +151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2233` // Estimated: `8877` - // Minimum execution time: 98_004_000 picoseconds. - Weight::from_parts(98_730_000, 8877) - .saturating_add(T::DbWeight::get().reads(13_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Minimum execution time: 99_901_000 picoseconds. + Weight::from_parts(102_919_000, 8877) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) @@ -162,19 +164,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1021` // Estimated: `4764` - // Minimum execution time: 45_888_000 picoseconds. - Weight::from_parts(47_568_327, 4764) - // Standard Error: 402 - .saturating_add(Weight::from_parts(7_520, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Minimum execution time: 45_230_000 picoseconds. + Weight::from_parts(47_052_829, 4764) + // Standard Error: 1_044 + .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) @@ -196,8 +196,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) /// Storage: VoterList CounterForListNodes (r:1 w:1) /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) @@ -211,12 +209,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2294 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 93_288_000 picoseconds. - Weight::from_parts(99_415_523, 6248) - // Standard Error: 3_291 - .saturating_add(Weight::from_parts(1_296_734, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 97_534_000 picoseconds. + Weight::from_parts(104_772_163, 6248) + // Standard Error: 3_674 + .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } @@ -246,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1414` // Estimated: `4556` - // Minimum execution time: 58_755_000 picoseconds. - Weight::from_parts(59_424_000, 4556) + // Minimum execution time: 57_467_000 picoseconds. + Weight::from_parts(59_437_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -258,12 +256,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + k * (569 ±0)` + // Measured: `1285 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_399_000 picoseconds. - Weight::from_parts(30_443_621, 4556) - // Standard Error: 10_402 - .saturating_add(Weight::from_parts(7_890_220, 0).saturating_mul(k.into())) + // Minimum execution time: 32_857_000 picoseconds. + Weight::from_parts(37_116_967, 4556) + // Standard Error: 9_522 + .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -294,12 +292,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1888 + n * (105 ±0)` + // Measured: `1908 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 68_471_000 picoseconds. - Weight::from_parts(65_972_990, 6248) - // Standard Error: 13_983 - .saturating_add(Weight::from_parts(3_255_731, 0).saturating_mul(n.into())) + // Minimum execution time: 69_613_000 picoseconds. + Weight::from_parts(68_079_061, 6248) + // Standard Error: 18_554 + .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -323,8 +321,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1748` // Estimated: `6248` - // Minimum execution time: 59_537_000 picoseconds. - Weight::from_parts(60_446_000, 6248) + // Minimum execution time: 60_430_000 picoseconds. + Weight::from_parts(62_702_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -336,8 +334,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `808` // Estimated: `4556` - // Minimum execution time: 15_403_000 picoseconds. - Weight::from_parts(15_676_000, 4556) + // Minimum execution time: 14_276_000 picoseconds. + Weight::from_parts(14_766_000, 4556) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -349,8 +347,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `907` // Estimated: `8122` - // Minimum execution time: 23_316_000 picoseconds. - Weight::from_parts(23_670_000, 8122) + // Minimum execution time: 21_710_000 picoseconds. + Weight::from_parts(22_430_000, 8122) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -360,8 +358,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_558_000 picoseconds. - Weight::from_parts(3_759_000, 0) + // Minimum execution time: 2_970_000 picoseconds. + Weight::from_parts(3_120_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -370,8 +368,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_724_000 picoseconds. - Weight::from_parts(13_047_000, 0) + // Minimum execution time: 9_362_000 picoseconds. + Weight::from_parts(9_785_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -380,8 +378,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_734_000 picoseconds. - Weight::from_parts(13_218_000, 0) + // Minimum execution time: 9_275_000 picoseconds. + Weight::from_parts(9_678_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -390,8 +388,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_996_000 picoseconds. - Weight::from_parts(13_375_000, 0) + // Minimum execution time: 9_414_000 picoseconds. + Weight::from_parts(9_848_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking Invulnerables (r:0 w:1) @@ -401,10 +399,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_920_000 picoseconds. - Weight::from_parts(4_619_469, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(10_108, 0).saturating_mul(v.into())) + // Minimum execution time: 3_061_000 picoseconds. + Weight::from_parts(3_618_535, 0) + // Standard Error: 44 + .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Staking Bonded (r:1 w:1) @@ -440,10 +438,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2018 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_516_000 picoseconds. - Weight::from_parts(92_324_464, 6248) - // Standard Error: 2_925 - .saturating_add(Weight::from_parts(1_286_284, 0).saturating_mul(s.into())) + // Minimum execution time: 87_914_000 picoseconds. + Weight::from_parts(95_688_129, 6248) + // Standard Error: 5_030 + .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -456,10 +454,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 90_193_000 picoseconds. - Weight::from_parts(821_522_318, 70104) - // Standard Error: 57_922 - .saturating_add(Weight::from_parts(4_554_659, 0).saturating_mul(s.into())) + // Minimum execution time: 99_269_000 picoseconds. + Weight::from_parts(1_154_264_637, 70104) + // Standard Error: 76_592 + .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -486,10 +484,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20217 + n * (143 ±0)` // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 80_329_000 picoseconds. - Weight::from_parts(97_340_643, 19844) - // Standard Error: 22_713 - .saturating_add(Weight::from_parts(29_087_425, 0).saturating_mul(n.into())) + // Minimum execution time: 91_767_000 picoseconds. + Weight::from_parts(146_781_264, 19844) + // Standard Error: 31_341 + .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -521,12 +519,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `34971 + n * (401 ±0)` - // Estimated: `32376 + n * (3774 ±0)` - // Minimum execution time: 105_591_000 picoseconds. - Weight::from_parts(111_587_915, 32376) - // Standard Error: 15_598 - .saturating_add(Weight::from_parts(48_948_195, 0).saturating_mul(n.into())) + // Measured: `33190 + n * (377 ±0)` + // Estimated: `30845 + n * (3774 ±0)` + // Minimum execution time: 121_303_000 picoseconds. + Weight::from_parts(151_046_907, 30845) + // Standard Error: 41_899 + .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -539,8 +537,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:0) @@ -552,15 +548,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2029 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 89_420_000 picoseconds. - Weight::from_parts(90_743_615, 8877) - // Standard Error: 1_260 - .saturating_add(Weight::from_parts(50_832, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Minimum execution time: 90_068_000 picoseconds. + Weight::from_parts(93_137_456, 8877) + // Standard Error: 4_799 + .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:1) /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) /// Storage: Staking Ledger (r:1 w:1) @@ -592,12 +586,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2294 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 100_911_000 picoseconds. - Weight::from_parts(102_678_006, 6248) - // Standard Error: 2_349 - .saturating_add(Weight::from_parts(1_262_431, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 103_139_000 picoseconds. + Weight::from_parts(107_036_296, 6248) + // Standard Error: 3_935 + .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } @@ -641,12 +635,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 554_712_000 picoseconds. - Weight::from_parts(556_603_000, 512390) - // Standard Error: 1_925_251 - .saturating_add(Weight::from_parts(62_627_196, 0).saturating_mul(v.into())) - // Standard Error: 191_840 - .saturating_add(Weight::from_parts(16_681_790, 0).saturating_mul(n.into())) + // Minimum execution time: 587_156_000 picoseconds. + Weight::from_parts(590_176_000, 512390) + // Standard Error: 2_008_420 + .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) + // Standard Error: 200_128 + .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -675,14 +669,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3135 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 31_770_670_000 picoseconds. - Weight::from_parts(31_839_042_000, 512390) - // Standard Error: 355_382 - .saturating_add(Weight::from_parts(5_044_540, 0).saturating_mul(v.into())) - // Standard Error: 355_382 - .saturating_add(Weight::from_parts(3_205_722, 0).saturating_mul(n.into())) + // Minimum execution time: 34_399_721_000 picoseconds. + Weight::from_parts(34_605_803_000, 512390) + // Standard Error: 380_106 + .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) + // Standard Error: 380_106 + .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -699,10 +693,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `983 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_253_567_000 picoseconds. - Weight::from_parts(61_440_613, 3510) - // Standard Error: 5_276 - .saturating_add(Weight::from_parts(4_414_153, 0).saturating_mul(v.into())) + // Minimum execution time: 2_392_849_000 picoseconds. + Weight::from_parts(64_373_879, 3510) + // Standard Error: 8_995 + .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -723,8 +717,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_292_000 picoseconds. - Weight::from_parts(9_587_000, 0) + // Minimum execution time: 7_529_000 picoseconds. + Weight::from_parts(7_970_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Staking MinCommission (r:0 w:1) @@ -743,8 +737,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_294_000 picoseconds. - Weight::from_parts(8_597_000, 0) + // Minimum execution time: 7_011_000 picoseconds. + Weight::from_parts(7_317_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: Staking Ledger (r:1 w:0) @@ -771,8 +765,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1871` // Estimated: `6248` - // Minimum execution time: 75_742_000 picoseconds. - Weight::from_parts(76_252_000, 6248) + // Minimum execution time: 75_982_000 picoseconds. + Weight::from_parts(77_412_000, 6248) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -784,8 +778,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `694` // Estimated: `3510` - // Minimum execution time: 16_407_000 picoseconds. - Weight::from_parts(16_726_000, 3510) + // Minimum execution time: 13_923_000 picoseconds. + Weight::from_parts(14_356_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -795,8 +789,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_977_000 picoseconds. - Weight::from_parts(5_224_000, 0) + // Minimum execution time: 3_415_000 picoseconds. + Weight::from_parts(3_679_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -819,8 +813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1047` // Estimated: `4764` - // Minimum execution time: 54_907_000 picoseconds. - Weight::from_parts(55_685_000, 4764) + // Minimum execution time: 53_983_000 picoseconds. + Weight::from_parts(55_296_000, 4764) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -840,8 +834,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2028` // Estimated: `8877` - // Minimum execution time: 94_779_000 picoseconds. - Weight::from_parts(95_455_000, 8877) + // Minimum execution time: 96_590_000 picoseconds. + Weight::from_parts(98_921_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -857,8 +851,6 @@ impl WeightInfo for () { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:0) @@ -869,10 +861,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2233` // Estimated: `8877` - // Minimum execution time: 98_004_000 picoseconds. - Weight::from_parts(98_730_000, 8877) - .saturating_add(RocksDbWeight::get().reads(13_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Minimum execution time: 99_901_000 picoseconds. + Weight::from_parts(102_919_000, 8877) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) @@ -882,19 +874,17 @@ impl WeightInfo for () { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1021` // Estimated: `4764` - // Minimum execution time: 45_888_000 picoseconds. - Weight::from_parts(47_568_327, 4764) - // Standard Error: 402 - .saturating_add(Weight::from_parts(7_520, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Minimum execution time: 45_230_000 picoseconds. + Weight::from_parts(47_052_829, 4764) + // Standard Error: 1_044 + .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: Staking Ledger (r:1 w:1) /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) @@ -916,8 +906,6 @@ impl WeightInfo for () { /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) /// Storage: VoterList CounterForListNodes (r:1 w:1) /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Balances Locks (r:1 w:1) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) @@ -931,12 +919,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2294 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 93_288_000 picoseconds. - Weight::from_parts(99_415_523, 6248) - // Standard Error: 3_291 - .saturating_add(Weight::from_parts(1_296_734, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 97_534_000 picoseconds. + Weight::from_parts(104_772_163, 6248) + // Standard Error: 3_674 + .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } @@ -966,8 +954,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1414` // Estimated: `4556` - // Minimum execution time: 58_755_000 picoseconds. - Weight::from_parts(59_424_000, 4556) + // Minimum execution time: 57_467_000 picoseconds. + Weight::from_parts(59_437_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -978,12 +966,12 @@ impl WeightInfo for () { /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + k * (569 ±0)` + // Measured: `1285 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_399_000 picoseconds. - Weight::from_parts(30_443_621, 4556) - // Standard Error: 10_402 - .saturating_add(Weight::from_parts(7_890_220, 0).saturating_mul(k.into())) + // Minimum execution time: 32_857_000 picoseconds. + Weight::from_parts(37_116_967, 4556) + // Standard Error: 9_522 + .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1014,12 +1002,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1888 + n * (105 ±0)` + // Measured: `1908 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 68_471_000 picoseconds. - Weight::from_parts(65_972_990, 6248) - // Standard Error: 13_983 - .saturating_add(Weight::from_parts(3_255_731, 0).saturating_mul(n.into())) + // Minimum execution time: 69_613_000 picoseconds. + Weight::from_parts(68_079_061, 6248) + // Standard Error: 18_554 + .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1043,8 +1031,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1748` // Estimated: `6248` - // Minimum execution time: 59_537_000 picoseconds. - Weight::from_parts(60_446_000, 6248) + // Minimum execution time: 60_430_000 picoseconds. + Weight::from_parts(62_702_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1056,8 +1044,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `808` // Estimated: `4556` - // Minimum execution time: 15_403_000 picoseconds. - Weight::from_parts(15_676_000, 4556) + // Minimum execution time: 14_276_000 picoseconds. + Weight::from_parts(14_766_000, 4556) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1069,8 +1057,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `907` // Estimated: `8122` - // Minimum execution time: 23_316_000 picoseconds. - Weight::from_parts(23_670_000, 8122) + // Minimum execution time: 21_710_000 picoseconds. + Weight::from_parts(22_430_000, 8122) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1080,8 +1068,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_558_000 picoseconds. - Weight::from_parts(3_759_000, 0) + // Minimum execution time: 2_970_000 picoseconds. + Weight::from_parts(3_120_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1090,8 +1078,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_724_000 picoseconds. - Weight::from_parts(13_047_000, 0) + // Minimum execution time: 9_362_000 picoseconds. + Weight::from_parts(9_785_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1100,8 +1088,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_734_000 picoseconds. - Weight::from_parts(13_218_000, 0) + // Minimum execution time: 9_275_000 picoseconds. + Weight::from_parts(9_678_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking ForceEra (r:0 w:1) @@ -1110,8 +1098,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_996_000 picoseconds. - Weight::from_parts(13_375_000, 0) + // Minimum execution time: 9_414_000 picoseconds. + Weight::from_parts(9_848_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking Invulnerables (r:0 w:1) @@ -1121,10 +1109,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_920_000 picoseconds. - Weight::from_parts(4_619_469, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(10_108, 0).saturating_mul(v.into())) + // Minimum execution time: 3_061_000 picoseconds. + Weight::from_parts(3_618_535, 0) + // Standard Error: 44 + .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Staking Bonded (r:1 w:1) @@ -1160,10 +1148,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2018 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_516_000 picoseconds. - Weight::from_parts(92_324_464, 6248) - // Standard Error: 2_925 - .saturating_add(Weight::from_parts(1_286_284, 0).saturating_mul(s.into())) + // Minimum execution time: 87_914_000 picoseconds. + Weight::from_parts(95_688_129, 6248) + // Standard Error: 5_030 + .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1176,10 +1164,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 90_193_000 picoseconds. - Weight::from_parts(821_522_318, 70104) - // Standard Error: 57_922 - .saturating_add(Weight::from_parts(4_554_659, 0).saturating_mul(s.into())) + // Minimum execution time: 99_269_000 picoseconds. + Weight::from_parts(1_154_264_637, 70104) + // Standard Error: 76_592 + .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1206,10 +1194,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20217 + n * (143 ±0)` // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 80_329_000 picoseconds. - Weight::from_parts(97_340_643, 19844) - // Standard Error: 22_713 - .saturating_add(Weight::from_parts(29_087_425, 0).saturating_mul(n.into())) + // Minimum execution time: 91_767_000 picoseconds. + Weight::from_parts(146_781_264, 19844) + // Standard Error: 31_341 + .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1241,12 +1229,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `34971 + n * (401 ±0)` - // Estimated: `32376 + n * (3774 ±0)` - // Minimum execution time: 105_591_000 picoseconds. - Weight::from_parts(111_587_915, 32376) - // Standard Error: 15_598 - .saturating_add(Weight::from_parts(48_948_195, 0).saturating_mul(n.into())) + // Measured: `33190 + n * (377 ±0)` + // Estimated: `30845 + n * (3774 ±0)` + // Minimum execution time: 121_303_000 picoseconds. + Weight::from_parts(151_046_907, 30845) + // Standard Error: 41_899 + .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1259,8 +1247,6 @@ impl WeightInfo for () { /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: Balances Freezes (r:1 w:0) /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: VoterList ListNodes (r:3 w:3) /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:0) @@ -1272,15 +1258,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2029 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 89_420_000 picoseconds. - Weight::from_parts(90_743_615, 8877) - // Standard Error: 1_260 - .saturating_add(Weight::from_parts(50_832, 0).saturating_mul(l.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Minimum execution time: 90_068_000 picoseconds. + Weight::from_parts(93_137_456, 8877) + // Standard Error: 4_799 + .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// Storage: Staking Bonded (r:1 w:1) /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) /// Storage: Staking Ledger (r:1 w:1) @@ -1312,12 +1296,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2294 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 100_911_000 picoseconds. - Weight::from_parts(102_678_006, 6248) - // Standard Error: 2_349 - .saturating_add(Weight::from_parts(1_262_431, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 103_139_000 picoseconds. + Weight::from_parts(107_036_296, 6248) + // Standard Error: 3_935 + .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } @@ -1361,12 +1345,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 554_712_000 picoseconds. - Weight::from_parts(556_603_000, 512390) - // Standard Error: 1_925_251 - .saturating_add(Weight::from_parts(62_627_196, 0).saturating_mul(v.into())) - // Standard Error: 191_840 - .saturating_add(Weight::from_parts(16_681_790, 0).saturating_mul(n.into())) + // Minimum execution time: 587_156_000 picoseconds. + Weight::from_parts(590_176_000, 512390) + // Standard Error: 2_008_420 + .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) + // Standard Error: 200_128 + .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1395,14 +1379,14 @@ impl WeightInfo for () { /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3135 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 31_770_670_000 picoseconds. - Weight::from_parts(31_839_042_000, 512390) - // Standard Error: 355_382 - .saturating_add(Weight::from_parts(5_044_540, 0).saturating_mul(v.into())) - // Standard Error: 355_382 - .saturating_add(Weight::from_parts(3_205_722, 0).saturating_mul(n.into())) + // Minimum execution time: 34_399_721_000 picoseconds. + Weight::from_parts(34_605_803_000, 512390) + // Standard Error: 380_106 + .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) + // Standard Error: 380_106 + .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1419,10 +1403,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `983 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_253_567_000 picoseconds. - Weight::from_parts(61_440_613, 3510) - // Standard Error: 5_276 - .saturating_add(Weight::from_parts(4_414_153, 0).saturating_mul(v.into())) + // Minimum execution time: 2_392_849_000 picoseconds. + Weight::from_parts(64_373_879, 3510) + // Standard Error: 8_995 + .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1443,8 +1427,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_292_000 picoseconds. - Weight::from_parts(9_587_000, 0) + // Minimum execution time: 7_529_000 picoseconds. + Weight::from_parts(7_970_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Staking MinCommission (r:0 w:1) @@ -1463,8 +1447,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_294_000 picoseconds. - Weight::from_parts(8_597_000, 0) + // Minimum execution time: 7_011_000 picoseconds. + Weight::from_parts(7_317_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: Staking Ledger (r:1 w:0) @@ -1491,8 +1475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1871` // Estimated: `6248` - // Minimum execution time: 75_742_000 picoseconds. - Weight::from_parts(76_252_000, 6248) + // Minimum execution time: 75_982_000 picoseconds. + Weight::from_parts(77_412_000, 6248) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1504,8 +1488,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `694` // Estimated: `3510` - // Minimum execution time: 16_407_000 picoseconds. - Weight::from_parts(16_726_000, 3510) + // Minimum execution time: 13_923_000 picoseconds. + Weight::from_parts(14_356_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1515,8 +1499,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_977_000 picoseconds. - Weight::from_parts(5_224_000, 0) + // Minimum execution time: 3_415_000 picoseconds. + Weight::from_parts(3_679_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 9cd875932ceaf..dd2693f10a851 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -12,10 +12,10 @@ description = "FRAME pallet migration of trie" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.133", optional = true } +serde = { version = "1.0.163", optional = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.3", default-features = false, optional = true } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } @@ -47,7 +47,21 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-tracing/std" +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking"] -try-runtime = ["frame-support/try-runtime"] remote-test = [ "remote-externalities", "serde", "std", "substrate-state-trie-migration-rpc", "thousands", "zstd" ] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 1f6266d999825..c5b41cd9a4439 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1062,20 +1062,16 @@ mod mock { }; use sp_runtime::{ traits::{BlakeTwo256, Header as _, IdentityLookup}, - StorageChild, + BuildStorage, StorageChild, }; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; + type Block = frame_system::mocking::MockBlockU32; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, StateTrieMigration: pallet_state_trie_migration::{Pallet, Call, Storage, Event}, } @@ -1091,13 +1087,12 @@ mod mock { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = sp_runtime::generic::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type DbWeight = (); @@ -1130,7 +1125,7 @@ mod mock { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -1241,8 +1236,8 @@ mod mock { }; if with_pallets { - frame_system::GenesisConfig::default() - .assimilate_storage::(&mut custom_storage) + frame_system::GenesisConfig::::default() + .assimilate_storage(&mut custom_storage) .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 1000)] } .assimilate_storage(&mut custom_storage) @@ -1619,18 +1614,18 @@ pub(crate) mod remote_tests { traits::{Get, Hooks}, weights::Weight, }; - use frame_system::Pallet as System; + use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; use remote_externalities::Mode; use sp_core::H256; use sp_runtime::{ - traits::{Block as BlockT, HashFor, Header as _, One, Zero}, + traits::{Block as BlockT, HashingFor, Header as _, One, Zero}, DeserializeOwned, }; use thousands::Separable; #[allow(dead_code)] fn run_to_block>( - n: ::BlockNumber, + n: BlockNumberFor, ) -> (H256, Weight) { let mut root = Default::default(); let mut weight_sum = Weight::zero(); @@ -1670,7 +1665,7 @@ pub(crate) mod remote_tests { frame_system::Pallet::::block_number() }); - let mut duration: ::BlockNumber = Zero::zero(); + let mut duration: BlockNumberFor = Zero::zero(); // set the version to 1, as if the upgrade happened. ext.state_version = sp_core::storage::StateVersion::V1; @@ -1703,7 +1698,7 @@ pub(crate) mod remote_tests { }); let compact_proof = - proof.clone().into_compact_proof::>(last_state_root).unwrap(); + proof.clone().into_compact_proof::>(last_state_root).unwrap(); log::info!( target: LOG_TARGET, "proceeded to #{}, weight: [{} / {}], proof: [{} / {} / {}]", diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index 8565dd73e0c46..df3338fdc17d3 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_state_trie_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_state_trie_migration +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_state_trie_migration. pub trait WeightInfo { @@ -68,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108` // Estimated: `2527` - // Minimum execution time: 17_385_000 picoseconds. - Weight::from_parts(17_766_000, 2527) + // Minimum execution time: 14_297_000 picoseconds. + Weight::from_parts(14_832_000, 2527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -79,16 +83,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_537_000 picoseconds. - Weight::from_parts(4_734_000, 1493) + // Minimum execution time: 4_237_000 picoseconds. + Weight::from_parts(4_646_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_127_000 picoseconds. - Weight::from_parts(10_384_000, 0) + // Minimum execution time: 8_898_000 picoseconds. + Weight::from_parts(9_237_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) @@ -96,8 +100,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `113` // Estimated: `3578` - // Minimum execution time: 31_113_000 picoseconds. - Weight::from_parts(31_833_000, 3578) + // Minimum execution time: 29_291_000 picoseconds. + Weight::from_parts(30_424_000, 3578) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_445_000 picoseconds. - Weight::from_parts(10_726_000, 0) + // Minimum execution time: 9_094_000 picoseconds. + Weight::from_parts(9_544_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) @@ -114,8 +118,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `105` // Estimated: `3570` - // Minimum execution time: 31_795_000 picoseconds. - Weight::from_parts(32_737_000, 3570) + // Minimum execution time: 30_286_000 picoseconds. + Weight::from_parts(30_948_000, 3570) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,10 +130,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `197 + v * (1 ±0)` // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_933_000 picoseconds. - Weight::from_parts(6_040_000, 3662) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(v.into())) + // Minimum execution time: 5_420_000 picoseconds. + Weight::from_parts(5_560_000, 3662) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_139, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -146,8 +150,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108` // Estimated: `2527` - // Minimum execution time: 17_385_000 picoseconds. - Weight::from_parts(17_766_000, 2527) + // Minimum execution time: 14_297_000 picoseconds. + Weight::from_parts(14_832_000, 2527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -157,16 +161,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_537_000 picoseconds. - Weight::from_parts(4_734_000, 1493) + // Minimum execution time: 4_237_000 picoseconds. + Weight::from_parts(4_646_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_127_000 picoseconds. - Weight::from_parts(10_384_000, 0) + // Minimum execution time: 8_898_000 picoseconds. + Weight::from_parts(9_237_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) @@ -174,8 +178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `113` // Estimated: `3578` - // Minimum execution time: 31_113_000 picoseconds. - Weight::from_parts(31_833_000, 3578) + // Minimum execution time: 29_291_000 picoseconds. + Weight::from_parts(30_424_000, 3578) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -183,8 +187,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_445_000 picoseconds. - Weight::from_parts(10_726_000, 0) + // Minimum execution time: 9_094_000 picoseconds. + Weight::from_parts(9_544_000, 0) } /// Storage: unknown `0x666f6f` (r:1 w:1) /// Proof Skipped: unknown `0x666f6f` (r:1 w:1) @@ -192,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `105` // Estimated: `3570` - // Minimum execution time: 31_795_000 picoseconds. - Weight::from_parts(32_737_000, 3570) + // Minimum execution time: 30_286_000 picoseconds. + Weight::from_parts(30_948_000, 3570) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -204,10 +208,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `197 + v * (1 ±0)` // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_933_000 picoseconds. - Weight::from_parts(6_040_000, 3662) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(v.into())) + // Minimum execution time: 5_420_000 picoseconds. + Weight::from_parts(5_560_000, 3662) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_139, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/frame/statement/Cargo.toml b/frame/statement/Cargo.toml index 8f9a6269573ec..cc51258165c90 100644 --- a/frame/statement/Cargo.toml +++ b/frame/statement/Cargo.toml @@ -12,16 +12,16 @@ description = "FRAME pallet for statement store" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"]} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../primitives/statement-store" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -40,7 +40,11 @@ std = [ "sp-io/std", "sp-core/std", "sp-statement-store/std", + "pallet-balances/std" ] try-runtime = [ "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs index f4d9360c9a6c0..79d2aa7d891d5 100644 --- a/frame/statement/src/mock.rs +++ b/frame/statement/src/mock.rs @@ -27,12 +27,10 @@ use frame_support::{ }; use sp_core::{Pair, H256}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, - AccountId32, + AccountId32, BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; pub const MIN_ALLOWED_STATEMENTS: u32 = 4; @@ -41,10 +39,7 @@ pub const MIN_ALLOWED_BYTES: u32 = 1024; pub const MAX_ALLOWED_BYTES: u32 = 4096; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -59,13 +54,12 @@ impl frame_system::Config for Test { type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -91,7 +85,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type MaxHolds = (); } @@ -111,7 +105,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances = pallet_balances::GenesisConfig:: { balances: vec![ (sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), 6000), diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index c50b3da262ab1..9f3851d91ef54 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -36,10 +36,16 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-core/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 50eb83810d55e..f735469558c70 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -59,7 +59,7 @@ //! use frame_system::pallet_prelude::*; //! //! #[pallet::pallet] -//! pub struct Pallet(PhantomData); +//! pub struct Pallet(_); //! //! #[pallet::config] //! pub trait Config: frame_system::Config {} @@ -141,7 +141,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::call] impl Pallet { @@ -289,7 +289,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { if let Some(ref key) = self.key { Key::::put(key); diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 95a6507c12dd5..9e78e474f4e5a 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -19,12 +19,12 @@ use super::*; use crate as sudo; -use frame_support::traits::{ConstU32, ConstU64, Contains, GenesisBuild}; +use frame_support::traits::{ConstU32, ConstU64, Contains}; use sp_core::H256; use sp_io; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; // Logger module to track execution. @@ -39,7 +39,7 @@ pub mod logger { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::call] impl Pallet { @@ -90,16 +90,12 @@ pub mod logger { pub(super) type I32Log = StorageValue<_, BoundedVec>, ValueQuery>; } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Sudo: sudo::{Pallet, Call, Config, Storage, Event}, Logger: logger::{Pallet, Call, Storage, Event}, } @@ -119,13 +115,12 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -157,7 +152,7 @@ pub type LoggerCall = logger::Call; // Build test environment by setting the root `key` for the Genesis. pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); sudo::GenesisConfig:: { key: Some(root_key) } .assimilate_storage(&mut t) .unwrap(); @@ -166,5 +161,5 @@ pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { #[cfg(feature = "runtime-benchmarks")] pub fn new_bench_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/frame/sudo/src/weights.rs b/frame/sudo/src/weights.rs index bc81cd607ac6d..6a0197d1469b4 100644 --- a/frame/sudo/src/weights.rs +++ b/frame/sudo/src/weights.rs @@ -18,26 +18,28 @@ //! Autogenerated weights for pallet_sudo //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_sudo +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/sudo/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -64,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 13_962_000 picoseconds. - Weight::from_parts(14_283_000, 1517) + // Minimum execution time: 12_918_000 picoseconds. + Weight::from_parts(13_403_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +77,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 14_009_000 picoseconds. - Weight::from_parts(14_400_000, 1517) + // Minimum execution time: 12_693_000 picoseconds. + Weight::from_parts(13_001_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Sudo Key (r:1 w:0) @@ -85,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_248_000, 1517) + // Minimum execution time: 12_590_000 picoseconds. + Weight::from_parts(12_994_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } } @@ -99,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 13_962_000 picoseconds. - Weight::from_parts(14_283_000, 1517) + // Minimum execution time: 12_918_000 picoseconds. + Weight::from_parts(13_403_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -110,8 +112,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 14_009_000 picoseconds. - Weight::from_parts(14_400_000, 1517) + // Minimum execution time: 12_693_000 picoseconds. + Weight::from_parts(13_001_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Sudo Key (r:1 w:0) @@ -120,8 +122,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_248_000, 1517) + // Minimum execution time: 12_590_000 picoseconds. + Weight::from_parts(12_994_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 6ed67c8081b5b..da6ee19e04a0d 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -13,47 +13,51 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.136", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +serde = { version = "1.0.163", default-features = false, features = ["alloc", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -frame-metadata = { version = "15.1.0", default-features = false, features = ["v14", "v15-unstable"] } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "6.0.0", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../primitives/arithmetic" } +frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api", features = [ "frame-metadata" ] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "10.0.0", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../primitives/arithmetic" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } +sp-weights = { version = "20.0.0", default-features = false, path = "../../primitives/weights" } sp-debug-derive = { default-features = false, path = "../../primitives/debug-derive" } tt-call = "1.0.8" +macro_magic = "0.4.2" frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } paste = "1.0" -once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.13.0", default-features = false, optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.28.0", default-features = false, optional = true, path = "../../primitives/state-machine" } bitflags = "1.3" impl-trait-for-tuples = "0.2.2" -smallvec = "1.8.0" +smallvec = "1.11.0" log = { version = "0.4.17", default-features = false } -sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/hashing/proc-macro" } -k256 = { version = "0.13.0", default-features = false, features = ["ecdsa"] } +sp-core-hashing-proc-macro = { version = "9.0.0", path = "../../primitives/core/hashing/proc-macro" } +k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } +sp-genesis-builder = { version = "0.1.0", default-features=false, path = "../../primitives/genesis-builder" } +serde_json = { version = "1.0.85", default-features = false, features = ["alloc"] } +docify = "0.2.1" + +aquamarine = { version = "0.3.2" } [dev-dependencies] -serde_json = "1.0.85" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } +array-bytes = "6.1" [features] default = ["std"] std = [ "sp-core/std", "k256/std", - "once_cell", - "serde", + "serde/std", "sp-api/std", "sp-io/std", "codec/std", @@ -70,6 +74,9 @@ std = [ "frame-support-procedural/std", "log/std", "environmental/std", + "sp-genesis-builder/std", + "frame-system/std", + "sp-debug-derive/std" ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", @@ -77,8 +84,11 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks" ] try-runtime = [ - "sp-debug-derive/force-debug" + "sp-debug-derive/force-debug", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] +experimental = [] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. no-metadata-docs = ["frame-support-procedural/no-metadata-docs", "sp-api/no-metadata-docs"] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 8e18376ba3288..1d201d5a87fc5 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -20,10 +20,12 @@ Inflector = "0.11.4" cfg-expr = "0.15.1" itertools = "0.10.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full"] } frame-support-procedural-tools = { version = "4.0.0-dev", path = "./tools" } -proc-macro-warning = { version = "0.3.0", default-features = false } +proc-macro-warning = { version = "0.4.1", default-features = false } +macro_magic = { version = "0.4.2", features = ["proc_support"] } +expander = "2.0.0" [features] default = ["std"] diff --git a/frame/support/procedural/src/benchmark.rs b/frame/support/procedural/src/benchmark.rs index 28b5aa1b983b7..9f28e7129d2e1 100644 --- a/frame/support/procedural/src/benchmark.rs +++ b/frame/support/procedural/src/benchmark.rs @@ -166,11 +166,11 @@ fn ensure_valid_return_type(item_fn: &ItemFn) -> Result<()> { if let ReturnType::Type(_, typ) = &item_fn.sig.output { let non_unit = |span| return Err(Error::new(span, "expected `()`")); let Type::Path(TypePath { path, qself: _ }) = &**typ else { - return Err(Error::new( + return Err(Error::new( typ.span(), "Only `Result<(), BenchmarkError>` or a blank return type is allowed on benchmark function definitions", )) - }; + }; let seg = path .segments .last() @@ -551,6 +551,12 @@ pub fn benchmarks( #krate::well_known_keys::EXTRINSIC_INDEX.into() ); whitelist.push(extrinsic_index); + // Whitelist the `:intrablock_entropy`. + let intrablock_entropy = #krate::TrackedStorageKey::new( + #krate::well_known_keys::INTRABLOCK_ENTROPY.into() + ); + whitelist.push(intrablock_entropy); + #krate::benchmarking::set_whitelist(whitelist.clone()); let mut results: #krate::Vec<#krate::BenchmarkResult> = #krate::Vec::new(); @@ -774,7 +780,7 @@ fn expand_benchmark( let call_name = match *expr_call.func { Expr::Path(expr_path) => { // normal function call - let Some(segment) = expr_path.path.segments.last() else { return call_err(); }; + let Some(segment) = expr_path.path.segments.last() else { return call_err() }; segment.ident.to_string() }, Expr::Infer(_) => { diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 5ec665682ddaa..cc4841418beec 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -123,9 +123,7 @@ pub fn expand_outer_dispatch( } } } - // Deprecated, but will warn when used - #[allow(deprecated)] - impl #scrate::weights::GetDispatchInfo for RuntimeCall {} + impl #scrate::dispatch::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { use #scrate::dispatch::GetCallName; diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index 4d73ebb11774a..440e856221a36 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -29,7 +29,7 @@ pub fn expand_outer_config( ) -> TokenStream { let mut types = TokenStream::new(); let mut fields = TokenStream::new(); - let mut build_storage_calls = TokenStream::new(); + let mut genesis_build_calls = TokenStream::new(); let mut query_genesis_config_part_macros = Vec::new(); for decl in pallet_decls { @@ -52,8 +52,8 @@ pub fn expand_outer_config( types.extend(expand_config_types(attr, runtime, decl, &config, part_is_generic)); fields.extend(quote!(#attr pub #field_name: #config,)); - build_storage_calls - .extend(expand_config_build_storage_call(scrate, attr, runtime, decl, field_name)); + genesis_build_calls + .extend(expand_config_build_storage_call(scrate, &config, attr, field_name)); query_genesis_config_part_macros.push(quote! { #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); #[cfg(feature = "std")] @@ -67,30 +67,36 @@ pub fn expand_outer_config( #types - #[cfg(any(feature = "std", test))] use #scrate::serde as __genesis_config_serde_import__; - #[cfg(any(feature = "std", test))] #[derive(#scrate::serde::Serialize, #scrate::serde::Deserialize, Default)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] #[serde(crate = "__genesis_config_serde_import__")] - pub struct GenesisConfig { + pub struct RuntimeGenesisConfig { #fields } #[cfg(any(feature = "std", test))] - impl #scrate::sp_runtime::BuildStorage for GenesisConfig { + #[deprecated(note = "GenesisConfig is planned to be removed in December 2023. Use `RuntimeGenesisConfig` instead.")] + pub type GenesisConfig = RuntimeGenesisConfig; + + #[cfg(any(feature = "std", test))] + impl #scrate::sp_runtime::BuildStorage for RuntimeGenesisConfig { fn assimilate_storage( &self, storage: &mut #scrate::sp_runtime::Storage, ) -> std::result::Result<(), String> { - #build_storage_calls - #scrate::BasicExternalities::execute_with_storage(storage, || { - ::on_genesis(); - }); + ::build(&self); + Ok(()) + }) + } + } - Ok(()) + impl #scrate::traits::BuildGenesisConfig for RuntimeGenesisConfig { + fn build(&self) { + #genesis_build_calls + ::on_genesis(); } } } @@ -108,17 +114,14 @@ fn expand_config_types( match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote! { #attr - #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; }, (None, true) => quote! { #attr - #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime>; }, (_, false) => quote! { #attr - #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig; }, } @@ -126,21 +129,12 @@ fn expand_config_types( fn expand_config_build_storage_call( scrate: &TokenStream, + pallet_genesis_config: &Ident, attr: &TokenStream, - runtime: &Ident, - decl: &Pallet, field_name: &Ident, ) -> TokenStream { - let path = &decl.path; - let instance = if let Some(inst) = decl.instance.as_ref() { - quote!(#path::#inst) - } else { - quote!(#path::__InherentHiddenInstance) - }; - quote! { #attr - #scrate::sp_runtime::BuildModuleGenesisStorage:: - <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; + <#pallet_genesis_config as #scrate::traits::BuildGenesisConfig>::build(&self.#field_name); } } diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs deleted file mode 100644 index fcd9b32141ab5..0000000000000 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ /dev/null @@ -1,168 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License - -use crate::construct_runtime::Pallet; -use proc_macro2::TokenStream; -use quote::quote; -use std::str::FromStr; -use syn::{Generics, Ident}; - -pub fn expand_outer_event( - runtime: &Ident, - pallet_decls: &[Pallet], - scrate: &TokenStream, -) -> syn::Result { - let mut event_variants = TokenStream::new(); - let mut event_conversions = TokenStream::new(); - let mut query_event_part_macros = Vec::new(); - - for pallet_decl in pallet_decls { - if let Some(pallet_entry) = pallet_decl.find_part("Event") { - let path = &pallet_decl.path; - let pallet_name = &pallet_decl.name; - let index = pallet_decl.index; - let instance = pallet_decl.instance.as_ref(); - let generics = &pallet_entry.generics; - - if instance.is_some() && generics.params.is_empty() { - let msg = format!( - "Instantiable pallet with no generic `Event` cannot \ - be constructed: pallet `{}` must have generic `Event`", - pallet_name, - ); - return Err(syn::Error::new(pallet_name.span(), msg)) - } - - let part_is_generic = !generics.params.is_empty(); - let pallet_event = match (instance, part_is_generic) { - (Some(inst), true) => quote!(#path::Event::<#runtime, #path::#inst>), - (Some(inst), false) => quote!(#path::Event::<#path::#inst>), - (None, true) => quote!(#path::Event::<#runtime>), - (None, false) => quote!(#path::Event), - }; - - event_variants.extend(expand_event_variant( - runtime, - pallet_decl, - index, - instance, - generics, - )); - event_conversions.extend(expand_event_conversion(scrate, pallet_decl, &pallet_event)); - query_event_part_macros.push(quote! { - #path::__substrate_event_check::is_event_part_defined!(#pallet_name); - }); - } - } - - Ok(quote! { - #( #query_event_part_macros )* - - #[derive( - Clone, PartialEq, Eq, - #scrate::codec::Encode, - #scrate::codec::Decode, - #scrate::scale_info::TypeInfo, - #scrate::RuntimeDebug, - )] - #[allow(non_camel_case_types)] - pub enum RuntimeEvent { - #event_variants - } - - #event_conversions - }) -} - -fn expand_event_variant( - runtime: &Ident, - pallet: &Pallet, - index: u8, - instance: Option<&Ident>, - generics: &Generics, -) -> TokenStream { - let path = &pallet.path; - let variant_name = &pallet.name; - let part_is_generic = !generics.params.is_empty(); - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); - - match instance { - Some(inst) if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#runtime, #path::#inst>), - }, - Some(inst) => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#path::#inst>), - }, - None if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#runtime>), - }, - None => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event), - }, - } -} - -fn expand_event_conversion( - scrate: &TokenStream, - pallet: &Pallet, - pallet_event: &TokenStream, -) -> TokenStream { - let variant_name = &pallet.name; - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); - - quote! { - #attr - impl From<#pallet_event> for RuntimeEvent { - fn from(x: #pallet_event) -> Self { - RuntimeEvent::#variant_name(x) - } - } - #attr - impl TryInto<#pallet_event> for RuntimeEvent { - type Error = (); - - fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_event, Self::Error> { - match self { - Self::#variant_name(evt) => Ok(evt), - _ => Err(()), - } - } - } - } -} diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 52586bd691d4e..1dc51d9cd2bc4 100644 --- a/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -19,12 +19,12 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; use std::str::FromStr; -use syn::{Ident, TypePath}; +use syn::Ident; pub fn expand_outer_inherent( runtime: &Ident, - block: &TypePath, - unchecked_extrinsic: &TypePath, + block: &TokenStream, + unchecked_extrinsic: &TokenStream, pallet_decls: &[Pallet], scrate: &TokenStream, ) -> TokenStream { @@ -58,22 +58,22 @@ pub fn expand_outer_inherent( trait InherentDataExt { fn create_extrinsics(&self) -> - #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic>; + #scrate::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic>; fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult; } impl InherentDataExt for #scrate::inherent::InherentData { fn create_extrinsics(&self) -> - #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic> + #scrate::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { use #scrate::inherent::ProvideInherent; - let mut inherents = Vec::new(); + let mut inherents = #scrate::sp_std::vec::Vec::new(); #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( + let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new( inherent.into(), None, ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ @@ -90,13 +90,41 @@ pub fn expand_outer_inherent( use #scrate::inherent::{ProvideInherent, IsFatalError}; use #scrate::traits::{IsSubType, ExtrinsicCall}; use #scrate::sp_runtime::traits::Block as _; + use #scrate::_private::sp_inherents::Error; + use #scrate::log; let mut result = #scrate::inherent::CheckInherentsResult::new(); + // This handle assume we abort on the first fatal error. + fn handle_put_error_result(res: Result<(), Error>) { + const LOG_TARGET: &str = "runtime::inherent"; + match res { + Ok(()) => (), + Err(Error::InherentDataExists(id)) => + log::debug!( + target: LOG_TARGET, + "Some error already reported for inherent {:?}, new non fatal \ + error is ignored", + id + ), + Err(Error::FatalErrorReported) => + log::error!( + target: LOG_TARGET, + "Fatal error already reported, unexpected considering there is \ + only one fatal error", + ), + Err(_) => + log::error!( + target: LOG_TARGET, + "Unexpected error from `put_error` operation", + ), + } + } + for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { + if #scrate::sp_runtime::traits::Extrinsic::is_signed(xt).unwrap_or(false) { break } @@ -110,9 +138,9 @@ pub fn expand_outer_inherent( if #pallet_names::is_inherent(call) { is_inherent = true; if let Err(e) = #pallet_names::check_inherent(call, self) { - result.put_error( + handle_put_error_result(result.put_error( #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); + )); if e.is_fatal_error() { return result; } @@ -134,7 +162,7 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_signed = #scrate::inherent::Extrinsic::is_signed(xt) + let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) .unwrap_or(false); if !is_signed { @@ -153,9 +181,9 @@ pub fn expand_outer_inherent( }); if !found { - result.put_error( + handle_put_error_result(result.put_error( #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); + )); if e.is_fatal_error() { return result; } @@ -163,9 +191,9 @@ pub fn expand_outer_inherent( }, Ok(None) => (), Err(e) => { - result.put_error( + handle_put_error_result(result.put_error( #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); + )); if e.is_fatal_error() { return result; } @@ -186,7 +214,8 @@ pub fn expand_outer_inherent( let mut first_signed_observed = false; for (i, xt) in block.extrinsics().iter().enumerate() { - let is_signed = #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false); + let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) + .unwrap_or(false); let is_inherent = if is_signed { // Signed extrinsics are not inherents. diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 81fc93ba3c9ef..0975fedb35d5b 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -15,17 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License -use crate::construct_runtime::Pallet; +use crate::construct_runtime::{parse::PalletPath, Pallet}; use proc_macro2::TokenStream; use quote::quote; use std::str::FromStr; -use syn::{Ident, TypePath}; +use syn::Ident; pub fn expand_runtime_metadata( runtime: &Ident, pallet_declarations: &[Pallet], scrate: &TokenStream, - extrinsic: &TypePath, + extrinsic: &TokenStream, + system_path: &PalletPath, ) -> TokenStream { let pallets = pallet_declarations .iter() @@ -95,11 +96,29 @@ pub fn expand_runtime_metadata( // `Deref` needs a reference for resolving the function call. let rt = #runtime; + let ty = #scrate::scale_info::meta_type::<#extrinsic>(); + let address_ty = #scrate::scale_info::meta_type::< + <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureAddress + >(); + let call_ty = #scrate::scale_info::meta_type::< + <#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::Call + >(); + let signature_ty = #scrate::scale_info::meta_type::< + <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::Signature + >(); + let extra_ty = #scrate::scale_info::meta_type::< + <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureExtra + >(); + #scrate::metadata_ir::MetadataIR { pallets: #scrate::sp_std::vec![ #(#pallets),* ], extrinsic: #scrate::metadata_ir::ExtrinsicMetadataIR { - ty: #scrate::scale_info::meta_type::<#extrinsic>(), + ty, version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + address_ty, + call_ty, + signature_ty, + extra_ty, signed_extensions: < < #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata @@ -115,11 +134,20 @@ pub fn expand_runtime_metadata( }, ty: #scrate::scale_info::meta_type::<#runtime>(), apis: (&rt).runtime_metadata(), + outer_enums: #scrate::metadata_ir::OuterEnumsIR { + call_enum_ty: #scrate::scale_info::meta_type::< + <#runtime as #system_path::Config>::RuntimeCall + >(), + event_enum_ty: #scrate::scale_info::meta_type::(), + error_enum_ty: #scrate::scale_info::meta_type::(), + } } } pub fn metadata() -> #scrate::metadata::RuntimeMetadataPrefixed { - #scrate::metadata_ir::into_latest(#runtime::metadata_ir()) + // Note: this always returns the V14 version. The runtime API function + // must be deprecated. + #scrate::metadata_ir::into_v14(#runtime::metadata_ir()) } pub fn metadata_at_version(version: u32) -> Option<#scrate::OpaqueMetadata> { diff --git a/frame/support/procedural/src/construct_runtime/expand/mod.rs b/frame/support/procedural/src/construct_runtime/expand/mod.rs index 0fd98bb4dda13..830338f9265ff 100644 --- a/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -17,24 +17,24 @@ mod call; mod config; -mod event; mod freeze_reason; mod hold_reason; mod inherent; mod lock_id; mod metadata; mod origin; +mod outer_enums; mod slash_reason; mod unsigned; pub use call::expand_outer_dispatch; pub use config::expand_outer_config; -pub use event::expand_outer_event; pub use freeze_reason::expand_outer_freeze_reason; pub use hold_reason::expand_outer_hold_reason; pub use inherent::expand_outer_inherent; pub use lock_id::expand_outer_lock_id; pub use metadata::expand_runtime_metadata; pub use origin::expand_outer_origin; +pub use outer_enums::{expand_outer_enum, OuterEnumType}; pub use slash_reason::expand_outer_slash_reason; pub use unsigned::expand_outer_validate_unsigned; diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index e48a70a9df973..0eaa73f603068 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -418,6 +418,34 @@ fn expand_origin_pallet_conversions( } } } + + #attr + impl<'a> TryFrom<&'a OriginCaller> for &'a #pallet_origin { + type Error = (); + fn try_from( + x: &'a OriginCaller, + ) -> #scrate::sp_std::result::Result<&'a #pallet_origin, ()> { + if let OriginCaller::#variant_name(l) = x { + Ok(&l) + } else { + Err(()) + } + } + } + + #attr + impl<'a> TryFrom<&'a RuntimeOrigin> for &'a #pallet_origin { + type Error = (); + fn try_from( + x: &'a RuntimeOrigin, + ) -> #scrate::sp_std::result::Result<&'a #pallet_origin, ()> { + if let OriginCaller::#variant_name(l) = &x.caller { + Ok(&l) + } else { + Err(()) + } + } + } } } diff --git a/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs b/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs new file mode 100644 index 0000000000000..b78360d5fc052 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs @@ -0,0 +1,281 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::{Span, TokenStream}; +use quote::{quote, ToTokens}; +use std::str::FromStr; +use syn::{Generics, Ident}; + +/// Represents the types supported for creating an outer enum. +#[derive(Clone, Copy, PartialEq)] +pub enum OuterEnumType { + /// Collects the Event enums from all pallets. + Event, + /// Collects the Error enums from all pallets. + Error, +} + +impl OuterEnumType { + /// The name of the structure this enum represents. + fn struct_name(&self) -> &str { + match self { + OuterEnumType::Event => "RuntimeEvent", + OuterEnumType::Error => "RuntimeError", + } + } + + /// The name of the variant (ie `Event` or `Error`). + fn variant_name(&self) -> &str { + match self { + OuterEnumType::Event => "Event", + OuterEnumType::Error => "Error", + } + } +} + +impl ToTokens for OuterEnumType { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + OuterEnumType::Event => quote!(Event).to_tokens(tokens), + OuterEnumType::Error => quote!(Error).to_tokens(tokens), + } + } +} + +/// Create an outer enum that encapsulates all pallets as variants. +/// +/// Each variant represents a pallet and contains the corresponding type declared with either: +/// - #[pallet::event] for the [`OuterEnumType::Event`] variant +/// - #[pallet::error] for the [`OuterEnumType::Error`] variant +/// +/// The name of the outer enum is prefixed with Runtime, resulting in names like RuntimeEvent +/// or RuntimeError. +/// +/// This structure facilitates the decoding process by leveraging the metadata. +/// +/// # Example +/// +/// The code generate looks like the following for [`OuterEnumType::Event`]. +/// +/// ```ignore +/// enum RuntimeEvent { +/// #[codec(index = 0)] +/// System(pallet_system::Event), +/// +/// #[codec(index = 5)] +/// Balances(pallet_system::Event), +/// } +/// ``` +/// +/// Notice that the pallet index is preserved using the `#[codec(index = ..)]` attribute. +pub fn expand_outer_enum( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, + enum_ty: OuterEnumType, +) -> syn::Result { + // Stores all pallet variants. + let mut enum_variants = TokenStream::new(); + // Generates the enum conversion between the `Runtime` outer enum and the pallet's enum. + let mut enum_conversions = TokenStream::new(); + // Specific for events to query via `is_event_part_defined!`. + let mut query_enum_part_macros = Vec::new(); + + let enum_name_str = enum_ty.variant_name(); + let enum_name_ident = Ident::new(enum_ty.struct_name(), Span::call_site()); + + for pallet_decl in pallet_decls { + let Some(pallet_entry) = pallet_decl.find_part(enum_name_str) else { continue }; + + let path = &pallet_decl.path; + let pallet_name = &pallet_decl.name; + let index = pallet_decl.index; + let instance = pallet_decl.instance.as_ref(); + let generics = &pallet_entry.generics; + + if instance.is_some() && generics.params.is_empty() { + let msg = format!( + "Instantiable pallet with no generic `{}` cannot \ + be constructed: pallet `{}` must have generic `{}`", + enum_name_str, pallet_name, enum_name_str, + ); + return Err(syn::Error::new(pallet_name.span(), msg)) + } + + let part_is_generic = !generics.params.is_empty(); + let pallet_enum = match (instance, part_is_generic) { + (Some(inst), true) => quote!(#path::#enum_ty::<#runtime, #path::#inst>), + (Some(inst), false) => quote!(#path::#enum_ty::<#path::#inst>), + (None, true) => quote!(#path::#enum_ty::<#runtime>), + (None, false) => quote!(#path::#enum_ty), + }; + + enum_variants.extend(expand_enum_variant( + runtime, + pallet_decl, + index, + instance, + generics, + enum_ty, + )); + enum_conversions.extend(expand_enum_conversion( + scrate, + pallet_decl, + &pallet_enum, + &enum_name_ident, + )); + + if enum_ty == OuterEnumType::Event { + query_enum_part_macros.push(quote! { + #path::__substrate_event_check::is_event_part_defined!(#pallet_name); + }); + } + } + + // Derives specific for the event. + let event_custom_derives = + if enum_ty == OuterEnumType::Event { quote!(Clone, PartialEq, Eq,) } else { quote!() }; + + // Implementation specific for errors. + let error_custom_impl = generate_error_impl(scrate, enum_ty); + + Ok(quote! { + #( #query_enum_part_macros )* + + #[derive( + #event_custom_derives + #scrate::codec::Encode, + #scrate::codec::Decode, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + #[allow(non_camel_case_types)] + pub enum #enum_name_ident { + #enum_variants + } + + #enum_conversions + + #error_custom_impl + }) +} + +fn expand_enum_variant( + runtime: &Ident, + pallet: &Pallet, + index: u8, + instance: Option<&Ident>, + generics: &Generics, + enum_ty: OuterEnumType, +) -> TokenStream { + let path = &pallet.path; + let variant_name = &pallet.name; + let part_is_generic = !generics.params.is_empty(); + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); + + match instance { + Some(inst) if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::#enum_ty<#runtime, #path::#inst>), + }, + Some(inst) => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::#enum_ty<#path::#inst>), + }, + None if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::#enum_ty<#runtime>), + }, + None => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::#enum_ty), + }, + } +} + +fn expand_enum_conversion( + scrate: &TokenStream, + pallet: &Pallet, + pallet_enum: &TokenStream, + enum_name_ident: &Ident, +) -> TokenStream { + let variant_name = &pallet.name; + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); + + quote! { + #attr + impl From<#pallet_enum> for #enum_name_ident { + fn from(x: #pallet_enum) -> Self { + #enum_name_ident + ::#variant_name(x) + } + } + #attr + impl TryInto<#pallet_enum> for #enum_name_ident { + type Error = (); + + fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_enum, Self::Error> { + match self { + Self::#variant_name(evt) => Ok(evt), + _ => Err(()), + } + } + } + } +} + +fn generate_error_impl(scrate: &TokenStream, enum_ty: OuterEnumType) -> TokenStream { + // Implementation is specific to `Error`s. + if enum_ty == OuterEnumType::Event { + return quote! {} + } + + let enum_name_ident = Ident::new(enum_ty.struct_name(), Span::call_site()); + + quote! { + impl #enum_name_ident { + /// Optionally convert the `DispatchError` into the `RuntimeError`. + /// + /// Returns `Some` if the error matches the `DispatchError::Module` variant, otherwise `None`. + pub fn from_dispatch_error(err: #scrate::sp_runtime::DispatchError) -> Option { + let #scrate::sp_runtime::DispatchError::Module(module_error) = err else { return None }; + + let bytes = #scrate::codec::Encode::encode(&module_error); + #scrate::codec::Decode::decode(&mut &bytes[..]).ok() + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 1af44fc00a0ec..efc2244154479 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -20,17 +20,69 @@ //! `construct_runtime` implementation is recursive and can generate code which will call itself in //! order to get all the pallet parts for each pallet. //! -//! Pallets define their parts (`Call`, `Storage`, ..) either explicitly with the syntax -//! `::{Call, ...}` or implicitly. +//! Pallets can define their parts: +//! - Implicitely: `System: frame_system` +//! - Explicitly: `System: frame_system::{Pallet, Call}` //! -//! In case a pallet defines its parts implicitly, then the pallet must provide the -//! `tt_default_parts` macro. `construct_rutime` will generate some code which utilizes `tt_call` -//! to call the `tt_default_parts` macro of the pallet. `tt_default_parts` will then return the -//! default pallet parts as input tokens to the `match_and_replace` macro, which ultimately -//! generates a call to `construct_runtime` again, this time with all the pallet parts explicitly -//! defined. +//! The `construct_runtime` transitions from the implicit definition to the explict one. +//! From the explicit state, Substrate expands the pallets with additional information +//! that is to be included in the runtime metadata. This expansion makes visible some extra +//! parts of the pallets, mainly the `Error` if defined. The expanded state looks like +//! `System: frame_system expanded::{Error} ::{Pallet, Call}` and concatenates the extra expanded +//! parts with the user-provided parts. For example, the `Pallet`, `Call` and `Error` parts are +//! collected. +//! +//! Pallets must provide the `tt_extra_parts` and `tt_default_parts` macros for these transitions. +//! These are automatically implemented by the `#[pallet::pallet]` macro. +//! +//! This macro also generates the following enums for ease of decoding: +//! - `enum RuntimeCall`: This type contains the information needed to decode extrinsics. +//! - `enum RuntimeEvent`: This type contains the information needed to decode events. +//! - `enum RuntimeError`: While this cannot be used directly to decode `sp_runtime::DispatchError` +//! from the chain, it contains the information needed to decode the +//! `sp_runtime::DispatchError::Module`. +//! +//! # State Transitions +//! +//! ```ignore +//! +----------+ +//! | Implicit | -----------+ +//! +----------+ | +//! | | +//! v v +//! +----------+ +------------------+ +//! | Explicit | --> | ExplicitExpanded | +//! +----------+ +------------------+ +//! ``` +//! +//! When all pallet parts are implcit, then the `construct_runtime!` macro expands to its final +//! state, the `ExplicitExpanded`. Otherwise, all implicit parts are converted to an explicit +//! expanded part allow the `construct_runtime!` to expand any remaining explicit parts to an +//! explicit expanded part. +//! +//! # Implicit to Explicit +//! +//! The `construct_runtime` macro transforms the implicit declaration of each pallet +//! `System: frame_system` to an explicit one `System: frame_system::{Pallet, Call}` using the +//! `tt_default_parts` macro. +//! +//! The `tt_default_parts` macro exposes a comma separated list of pallet parts. For example, the +//! `Event` part is exposed only if the pallet implements an event via `#[pallet::event]` macro. +//! The tokens generated by this macro are ` expanded :: { Pallet, Call }` for our example. +//! +//! The `match_and_insert` macro takes in 3 arguments: +//! - target: This is the `TokenStream` that contains the `construct_runtime!` macro. +//! - pattern: The pattern to match against in the target stream. +//! - tokens: The tokens to added after the pattern match. +//! +//! The `construct_runtime` macro uses the `tt_call` to get the default pallet parts via +//! the `tt_default_parts` macro defined by each pallet. The pallet parts are then returned as +//! input to the `match_and_replace` macro. +//! The `match_and_replace` then will modify the the `construct_runtime!` to expand the implicit +//! definition to the explicit one. +//! +//! For example, //! -//! E.g. //! ```ignore //! construct_runtime!( //! //... @@ -106,6 +158,7 @@ //! tokens = [{ ::{Pallet, Call} }] //! } //! ``` +//! //! Which will then finally expand to the following: //! ```ignore //! construct_runtime!( @@ -116,6 +169,7 @@ //! } //! ) //! ``` +//! //! This call has no implicit pallet parts, thus it will expand to the runtime construction: //! ```ignore //! pub struct Runtime { ... } @@ -140,6 +194,19 @@ //! | w/ pallet parts | //! +--------------------+ //! ``` +//! +//! # Explicit to Explicit Expanded +//! +//! Users normally do not care about this transition. +//! +//! Similarly to the previous transition, the macro expansion transforms `System: +//! frame_system::{Pallet, Call}` into `System: frame_system expanded::{Error} ::{Pallet, Call}`. +//! The `expanded` section adds extra parts that the Substrate would like to expose for each pallet +//! by default. This is done to expose the approprite types for metadata construction. +//! +//! This time, instead of calling `tt_default_parts` we are using the `tt_extra_parts` macro. +//! This macro returns the ` :: expanded { Error }` list of additional parts we would like to +//! expose. mod expand; mod parse; @@ -149,10 +216,7 @@ use frame_support_procedural_tools::{ generate_crate_access, generate_crate_access_2018, generate_hidden_includes, }; use itertools::Itertools; -use parse::{ - ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration, - WhereSection, -}; +use parse::{ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration}; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; @@ -171,19 +235,39 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let res = match definition { RuntimeDeclaration::Implicit(implicit_def) => check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( - |_| construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), + |_| construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), ), - RuntimeDeclaration::Explicit(explicit_decl) => + RuntimeDeclaration::Explicit(explicit_decl) => check_pallet_number( + input_copy.clone().into(), + explicit_decl.pallets.len(), + ) + .and_then(|_| { + construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl) + }), + RuntimeDeclaration::ExplicitExpanded(explicit_decl) => check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) .and_then(|_| construct_runtime_final_expansion(explicit_decl)), }; - res.unwrap_or_else(|e| e.to_compile_error()).into() + let res = res.unwrap_or_else(|e| e.to_compile_error()); + + let res = expander::Expander::new("construct_runtime") + .dry(std::env::var("FRAME_EXPAND").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + + res.into() } -/// When some pallet have implicit parts definition then the macro will expand into a macro call to -/// `construct_runtime_args` of each pallets, see root documentation. -fn construct_runtime_intermediary_expansion( +/// All pallets that have implicit pallet parts (ie `System: frame_system`) are +/// expanded with the default parts defined by the pallet's `tt_default_parts` macro. +/// +/// This function transforms the [`RuntimeDeclaration::Implicit`] into +/// [`RuntimeDeclaration::Explicit`] that is not yet fully expanded. +/// +/// For more details, please refer to the root documentation. +fn construct_runtime_implicit_to_explicit( input: TokenStream2, definition: ImplicitRuntimeDeclaration, ) -> Result { @@ -210,23 +294,54 @@ fn construct_runtime_intermediary_expansion( Ok(expansion) } +/// All pallets that have +/// (I): explicit pallet parts (ie `System: frame_system::{Pallet, Call}`) and +/// (II): are not fully expanded (ie do not include the `Error` expansion part) +/// are fully expanded by including the parts from the pallet's `tt_extra_parts` macro. +/// +/// This function transforms the [`RuntimeDeclaration::Explicit`] that is not yet fully expanded +/// into [`RuntimeDeclaration::ExplicitExpanded`] fully expanded. +/// +/// For more details, please refer to the root documentation. +fn construct_runtime_explicit_to_explicit_expanded( + input: TokenStream2, + definition: ExplicitRuntimeDeclaration, +) -> Result { + let frame_support = generate_crate_access_2018("frame-support")?; + let mut expansion = quote::quote!( + #frame_support::construct_runtime! { #input } + ); + for pallet in definition.pallets.iter().filter(|pallet| !pallet.is_expanded) { + let pallet_path = &pallet.path; + let pallet_name = &pallet.name; + let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(::<#instance>)); + expansion = quote::quote!( + #frame_support::tt_call! { + macro = [{ #pallet_path::tt_extra_parts }] + frame_support = [{ #frame_support }] + ~~> #frame_support::match_and_insert! { + target = [{ #expansion }] + pattern = [{ #pallet_name: #pallet_path #pallet_instance }] + } + } + ); + } + + Ok(expansion) +} + /// All pallets have explicit definition of parts, this will expand to the runtime declaration. fn construct_runtime_final_expansion( definition: ExplicitRuntimeDeclaration, ) -> Result { - let ExplicitRuntimeDeclaration { - name, - where_section: WhereSection { block, node_block, unchecked_extrinsic }, - pallets, - pallets_token, - } = definition; + let ExplicitRuntimeDeclaration { name, pallets, pallets_token, where_section } = definition; let system_pallet = pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { syn::Error::new( pallets_token.span.join(), "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) })?; if !system_pallet.cfg_pattern.is_empty() { @@ -256,14 +371,27 @@ fn construct_runtime_final_expansion( let scrate = generate_crate_access(hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(hidden_crate_name, "frame-support"); - let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; + let frame_system = generate_crate_access_2018("frame-system")?; + let block = quote!(<#name as #frame_system::Config>::Block); + let unchecked_extrinsic = quote!(<#block as #scrate::sp_runtime::traits::Block>::Extrinsic); + + let outer_event = + expand::expand_outer_enum(&name, &pallets, &scrate, expand::OuterEnumType::Event)?; + let outer_error = + expand::expand_outer_enum(&name, &pallets, &scrate, expand::OuterEnumType::Error)?; let outer_origin = expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?; let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); - let metadata = expand::expand_runtime_metadata(&name, &pallets, &scrate, &unchecked_extrinsic); + let metadata = expand::expand_runtime_metadata( + &name, + &pallets, + &scrate, + &unchecked_extrinsic, + &system_pallet.path, + ); let outer_config = expand::expand_outer_config(&name, &pallets, &scrate); let inherent = expand::expand_outer_inherent(&name, &block, &unchecked_extrinsic, &pallets, &scrate); @@ -275,7 +403,21 @@ fn construct_runtime_final_expansion( let integrity_test = decl_integrity_test(&scrate); let static_assertions = decl_static_assertions(&name, &pallets, &scrate); + let warning = + where_section.map_or(None, |where_section| { + Some(proc_macro_warning::Warning::new_deprecated("WhereSection") + .old("use a `where` clause in `construct_runtime`") + .new("use `frame_system::Config` to set the `Block` type and delete this clause. + It is planned to be removed in December 2023") + .help_links(&["https://github.com/paritytech/substrate/pull/14437"]) + .span(where_section.span) + .build(), + ) + }); + let res = quote!( + #warning + #scrate_decl // Prevent UncheckedExtrinsic to print unused warning. @@ -289,9 +431,6 @@ fn construct_runtime_final_expansion( #scrate::scale_info::TypeInfo )] pub struct #name; - impl #scrate::sp_runtime::traits::GetNodeBlockType for #name { - type NodeBlock = #node_block; - } impl #scrate::sp_runtime::traits::GetRuntimeBlockType for #name { type RuntimeBlock = #block; } @@ -323,6 +462,8 @@ fn construct_runtime_final_expansion( #outer_event + #outer_error + #outer_origin #all_pallets diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index f819a90d1b5cd..9b08e16469754 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -35,6 +35,7 @@ mod keyword { syn::custom_keyword!(Call); syn::custom_keyword!(Storage); syn::custom_keyword!(Event); + syn::custom_keyword!(Error); syn::custom_keyword!(Config); syn::custom_keyword!(Origin); syn::custom_keyword!(Inherent); @@ -45,6 +46,7 @@ mod keyword { syn::custom_keyword!(SlashReason); syn::custom_keyword!(exclude_parts); syn::custom_keyword!(use_parts); + syn::custom_keyword!(expanded); } /// Declaration of a runtime. @@ -56,13 +58,14 @@ mod keyword { pub enum RuntimeDeclaration { Implicit(ImplicitRuntimeDeclaration), Explicit(ExplicitRuntimeDeclaration), + ExplicitExpanded(ExplicitRuntimeDeclaration), } /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug)] pub struct ImplicitRuntimeDeclaration { pub name: Ident, - pub where_section: WhereSection, + pub where_section: Option, pub pallets: Vec, } @@ -70,7 +73,7 @@ pub struct ImplicitRuntimeDeclaration { #[derive(Debug)] pub struct ExplicitRuntimeDeclaration { pub name: Ident, - pub where_section: WhereSection, + pub where_section: Option, pub pallets: Vec, pub pallets_token: token::Brace, } @@ -87,7 +90,7 @@ impl Parse for RuntimeDeclaration { } let name = input.parse::()?; - let where_section = input.parse()?; + let where_section = if input.peek(token::Where) { Some(input.parse()?) } else { None }; let pallets = input.parse::>>()?; let pallets_token = pallets.token; @@ -106,12 +109,20 @@ impl Parse for RuntimeDeclaration { pallets, pallets_token, })), + PalletsConversion::ExplicitExpanded(pallets) => + Ok(RuntimeDeclaration::ExplicitExpanded(ExplicitRuntimeDeclaration { + name, + where_section, + pallets, + pallets_token, + })), } } } #[derive(Debug)] pub struct WhereSection { + pub span: Span, pub block: syn::TypePath, pub node_block: syn::TypePath, pub unchecked_extrinsic: syn::TypePath, @@ -120,6 +131,7 @@ pub struct WhereSection { impl Parse for WhereSection { fn parse(input: ParseStream) -> Result { input.parse::()?; + let mut definitions = Vec::new(); while !input.peek(token::Brace) { let definition: WhereDefinition = input.parse()?; @@ -143,7 +155,7 @@ impl Parse for WhereSection { ); return Err(Error::new(*kind_span, msg)) } - Ok(Self { block, node_block, unchecked_extrinsic }) + Ok(Self { span: input.span(), block, node_block, unchecked_extrinsic }) } } @@ -188,6 +200,8 @@ impl Parse for WhereDefinition { /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { + /// Is this pallet fully expanded? + pub is_expanded: bool, /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, /// Optional attributes tagged right above a pallet declaration. @@ -233,6 +247,7 @@ impl Parse for PalletDeclaration { let _: Token![>] = input.parse()?; res } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) && + !input.peek(keyword::expanded) && !input.peek(keyword::exclude_parts) && !input.peek(keyword::use_parts) && !input.peek(Token![=]) && @@ -246,10 +261,21 @@ impl Parse for PalletDeclaration { None }; + // Check if the pallet is fully expanded. + let (is_expanded, extra_parts) = if input.peek(keyword::expanded) { + let _: keyword::expanded = input.parse()?; + let _: Token![::] = input.parse()?; + (true, parse_pallet_parts(input)?) + } else { + (false, vec![]) + }; + // Parse for explicit parts let pallet_parts = if input.peek(Token![::]) && input.peek3(token::Brace) { let _: Token![::] = input.parse()?; - Some(parse_pallet_parts(input)?) + let mut parts = parse_pallet_parts(input)?; + parts.extend(extra_parts.into_iter()); + Some(parts) } else if !input.peek(keyword::exclude_parts) && !input.peek(keyword::use_parts) && !input.peek(Token![=]) && @@ -260,7 +286,7 @@ impl Parse for PalletDeclaration { "Unexpected tokens, expected one of `::{`, `exclude_parts`, `use_parts`, `=`, `,`", )) } else { - None + is_expanded.then_some(extra_parts) }; // Parse for specified parts @@ -288,7 +314,7 @@ impl Parse for PalletDeclaration { None }; - Ok(Self { attrs, name, path, instance, pallet_parts, specified_parts, index }) + Ok(Self { is_expanded, attrs, name, path, instance, pallet_parts, specified_parts, index }) } } @@ -371,6 +397,7 @@ pub enum PalletPartKeyword { Call(keyword::Call), Storage(keyword::Storage), Event(keyword::Event), + Error(keyword::Error), Config(keyword::Config), Origin(keyword::Origin), Inherent(keyword::Inherent), @@ -393,6 +420,8 @@ impl Parse for PalletPartKeyword { Ok(Self::Storage(input.parse()?)) } else if lookahead.peek(keyword::Event) { Ok(Self::Event(input.parse()?)) + } else if lookahead.peek(keyword::Error) { + Ok(Self::Error(input.parse()?)) } else if lookahead.peek(keyword::Config) { Ok(Self::Config(input.parse()?)) } else if lookahead.peek(keyword::Origin) { @@ -423,6 +452,7 @@ impl PalletPartKeyword { Self::Call(_) => "Call", Self::Storage(_) => "Storage", Self::Event(_) => "Event", + Self::Error(_) => "Error", Self::Config(_) => "Config", Self::Origin(_) => "Origin", Self::Inherent(_) => "Inherent", @@ -441,7 +471,7 @@ impl PalletPartKeyword { /// Returns the names of all pallet parts that allow to have a generic argument. fn all_generic_arg() -> &'static [&'static str] { - &["Event", "Origin", "Config"] + &["Event", "Error", "Origin", "Config"] } } @@ -452,6 +482,7 @@ impl ToTokens for PalletPartKeyword { Self::Call(inner) => inner.to_tokens(tokens), Self::Storage(inner) => inner.to_tokens(tokens), Self::Event(inner) => inner.to_tokens(tokens), + Self::Error(inner) => inner.to_tokens(tokens), Self::Config(inner) => inner.to_tokens(tokens), Self::Origin(inner) => inner.to_tokens(tokens), Self::Inherent(inner) => inner.to_tokens(tokens), @@ -554,6 +585,8 @@ fn parse_pallet_parts_no_generic(input: ParseStream) -> Result | Explicit | -> | ExplicitExpanded | +/// +----------+ +----------+ +------------------+ +/// ``` enum PalletsConversion { + /// Pallets implicitely declare parts. + /// + /// `System: frame_system`. Implicit(Vec), + /// Pallets explicitly declare parts. + /// + /// `System: frame_system::{Pallet, Call}` + /// + /// However, for backwards compatibility with Polkadot/Kusama + /// we must propagate some other parts to the pallet by default. Explicit(Vec), + /// Pallets explicitly declare parts that are fully expanded. + /// + /// This is the end state that contains extra parts included by + /// default by Subtrate. + /// + /// `System: frame_system expanded::{Error} ::{Pallet, Call}` + /// + /// For this example, the `Pallet`, `Call` and `Error` parts are collected. + ExplicitExpanded(Vec), } /// Convert from the parsed pallet declaration to their final information. @@ -604,6 +663,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result = None; let mut names = HashMap::new(); + let mut is_expanded = true; let pallets = pallets .into_iter() @@ -698,7 +758,10 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; + is_expanded &= pallet.is_expanded; + Ok(Pallet { + is_expanded: pallet.is_expanded, name: pallet.name, index: final_index, path: pallet.path, @@ -709,5 +772,9 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; - Ok(PalletsConversion::Explicit(pallets)) + if is_expanded { + Ok(PalletsConversion::ExplicitExpanded(pallets)) + } else { + Ok(PalletsConversion::Explicit(pallets)) + } } diff --git a/frame/support/procedural/src/derive_impl.rs b/frame/support/procedural/src/derive_impl.rs new file mode 100644 index 0000000000000..5ea44c35bb646 --- /dev/null +++ b/frame/support/procedural/src/derive_impl.rs @@ -0,0 +1,175 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `derive_impl` attribute macro. + +use derive_syn_parse::Parse; +use macro_magic::mm_core::ForeignPath; +use proc_macro2::TokenStream as TokenStream2; +use quote::{quote, ToTokens}; +use std::collections::HashSet; +use syn::{parse2, parse_quote, spanned::Spanned, Ident, ImplItem, ItemImpl, Path, Result, Token}; + +#[derive(Parse)] +pub struct DeriveImplAttrArgs { + pub default_impl_path: Path, + _as: Option, + #[parse_if(_as.is_some())] + pub disambiguation_path: Option, +} + +impl ForeignPath for DeriveImplAttrArgs { + fn foreign_path(&self) -> &Path { + &self.default_impl_path + } +} + +impl ToTokens for DeriveImplAttrArgs { + fn to_tokens(&self, tokens: &mut TokenStream2) { + tokens.extend(self.default_impl_path.to_token_stream()); + tokens.extend(self._as.to_token_stream()); + tokens.extend(self.disambiguation_path.to_token_stream()); + } +} + +/// Gets the [`Ident`] representation of the given [`ImplItem`], if one exists. Otherwise +/// returns [`None`]. +/// +/// Used by [`combine_impls`] to determine whether we can compare [`ImplItem`]s by [`Ident`] +/// or not. +fn impl_item_ident(impl_item: &ImplItem) -> Option<&Ident> { + match impl_item { + ImplItem::Const(item) => Some(&item.ident), + ImplItem::Fn(item) => Some(&item.sig.ident), + ImplItem::Type(item) => Some(&item.ident), + ImplItem::Macro(item) => item.mac.path.get_ident(), + _ => None, + } +} + +/// The real meat behind `derive_impl`. Takes in a `local_impl`, which is the impl for which we +/// want to implement defaults (i.e. the one the attribute macro is attached to), and a +/// `foreign_impl`, which is the impl containing the defaults we want to use, and returns an +/// [`ItemImpl`] containing the final generated impl. +/// +/// This process has the following caveats: +/// * Colliding items that have an ident are not copied into `local_impl` +/// * Uncolliding items that have an ident are copied into `local_impl` but are qualified as `type +/// #ident = <#default_impl_path as #disambiguation_path>::#ident;` +/// * Items that lack an ident are de-duplicated so only unique items that lack an ident are copied +/// into `local_impl`. Items that lack an ident and also exist verbatim in `local_impl` are not +/// copied over. +fn combine_impls( + local_impl: ItemImpl, + foreign_impl: ItemImpl, + default_impl_path: Path, + disambiguation_path: Path, +) -> ItemImpl { + let (existing_local_keys, existing_unsupported_items): (HashSet, HashSet) = + local_impl + .items + .iter() + .cloned() + .partition(|impl_item| impl_item_ident(impl_item).is_some()); + let existing_local_keys: HashSet = existing_local_keys + .into_iter() + .filter_map(|item| impl_item_ident(&item).cloned()) + .collect(); + let mut final_impl = local_impl; + let extended_items = foreign_impl.items.into_iter().filter_map(|item| { + if let Some(ident) = impl_item_ident(&item) { + if existing_local_keys.contains(&ident) { + // do not copy colliding items that have an ident + return None + } + if matches!(item, ImplItem::Type(_)) { + // modify and insert uncolliding type items + let modified_item: ImplItem = parse_quote! { + type #ident = <#default_impl_path as #disambiguation_path>::#ident; + }; + return Some(modified_item) + } + // copy uncolliding non-type items that have an ident + Some(item) + } else { + // do not copy colliding items that lack an ident + (!existing_unsupported_items.contains(&item)) + // copy uncolliding items without an ident verbatim + .then_some(item) + } + }); + final_impl.items.extend(extended_items); + final_impl +} + +/// Internal implementation behind [`#[derive_impl(..)]`](`macro@crate::derive_impl`). +/// +/// `default_impl_path`: the module path of the external `impl` statement whose tokens we are +/// importing via `macro_magic` +/// +/// `foreign_tokens`: the tokens for the external `impl` statement +/// +/// `local_tokens`: the tokens for the local `impl` statement this attribute is attached to +/// +/// `disambiguation_path`: the module path of the external trait we will use to qualify +/// defaults imported from the external `impl` statement +pub fn derive_impl( + default_impl_path: TokenStream2, + foreign_tokens: TokenStream2, + local_tokens: TokenStream2, + disambiguation_path: Option, +) -> Result { + let local_impl = parse2::(local_tokens)?; + let foreign_impl = parse2::(foreign_tokens)?; + let default_impl_path = parse2::(default_impl_path)?; + + // have disambiguation_path default to the item being impl'd in the foreign impl if we + // don't specify an `as [disambiguation_path]` in the macro attr + let disambiguation_path = match (disambiguation_path, foreign_impl.clone().trait_) { + (Some(disambiguation_path), _) => disambiguation_path, + (None, Some((_, foreign_impl_path, _))) => foreign_impl_path, + _ => + return Err(syn::Error::new( + foreign_impl.span(), + "Impl statement must have a defined type being implemented \ + for a defined type such as `impl A for B`", + )), + }; + + // generate the combined impl + let combined_impl = + combine_impls(local_impl, foreign_impl, default_impl_path, disambiguation_path); + + Ok(quote!(#combined_impl)) +} + +#[test] +fn test_derive_impl_attr_args_parsing() { + parse2::(quote!( + some::path::TestDefaultConfig as some::path::DefaultConfig + )) + .unwrap(); + parse2::(quote!( + frame_system::prelude::testing::TestDefaultConfig as DefaultConfig + )) + .unwrap(); + parse2::(quote!(Something as some::path::DefaultConfig)).unwrap(); + parse2::(quote!(Something as DefaultConfig)).unwrap(); + parse2::(quote!(DefaultConfig)).unwrap(); + assert!(parse2::(quote!()).is_err()); + assert!(parse2::(quote!(Config Config)).is_err()); +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 25df8410b02d1..2a46696ed4f70 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -20,26 +20,27 @@ #![recursion_limit = "512"] mod benchmark; -mod clone_no_bound; mod construct_runtime; mod crate_version; -mod debug_no_bound; -mod default_no_bound; +mod derive_impl; mod dummy_part_checker; mod key_prefix; mod match_and_insert; +mod no_bound; mod pallet; mod pallet_error; -mod partial_eq_no_bound; -mod storage; mod storage_alias; mod transactional; mod tt_macro; +use frame_support_procedural_tools::generate_crate_access_2018; +use macro_magic::import_tokens_attr; use proc_macro::TokenStream; -use quote::quote; +use quote::{quote, ToTokens}; use std::{cell::RefCell, str::FromStr}; -pub(crate) use storage::INHERENT_INSTANCE_NAME; +use syn::{parse_macro_input, Error, ItemImpl, ItemMod}; + +pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; thread_local! { /// A global counter, can be used to generate a relatively unique identifier. @@ -75,233 +76,6 @@ fn counter_prefix(prefix: &str) -> String { format!("CounterFor{}", prefix) } -/// Declares strongly-typed wrappers around codec-compatible types in storage. -/// -/// ## Example -/// -/// ```nocompile -/// decl_storage! { -/// trait Store for Module as Example { -/// Foo get(fn foo) config(): u32=12; -/// Bar: map hasher(identity) u32 => u32; -/// pub Zed build(|config| vec![(0, 0)]): map hasher(identity) u32 => u32; -/// } -/// } -/// ``` -/// -/// Declaration is set with the header `(pub) trait Store for Module as Example`, -/// with `Store` a (pub) trait generated associating each storage item to the `Module` and -/// `as Example` setting the prefix used for storage items of this module. `Example` must be unique: -/// another module with the same name and the same inner storage item name will conflict. -/// `Example` is called the module prefix. -/// -/// note: For instantiable modules the module prefix is prepended with instance -/// prefix. Instance prefix is "" for default instance and "Instance$n" for instance number $n. -/// Thus, instance 3 of module Example has a module prefix of `Instance3Example` -/// -/// Basic storage consists of a name and a type; supported types are: -/// -/// * Value: `Foo: type`: Implements the -/// [`StorageValue`](../frame_support/storage/trait.StorageValue.html) trait using the -/// [`StorageValue generator`](../frame_support/storage/generator/trait.StorageValue.html). -/// -/// The generator is implemented with: -/// * `module_prefix`: module_prefix -/// * `storage_prefix`: storage_name -/// -/// Thus the storage value is finally stored at: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(storage_prefix) -/// ``` -/// -/// * Map: `Foo: map hasher($hash) type => type`: Implements the -/// [`StorageMap`](../frame_support/storage/trait.StorageMap.html) trait using the [`StorageMap -/// generator`](../frame_support/storage/generator/trait.StorageMap.html). And -/// [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). -/// -/// `$hash` representing a choice of hashing algorithms available in the -/// [`Hashable`](../frame_support/trait.Hashable.html) trait. You will generally want to use one -/// of three hashers: -/// * `blake2_128_concat`: The default, safe choice. Use if you are unsure or don't care. It is -/// secure against user-tainted keys, fairly fast and memory-efficient and supports iteration -/// over its keys and values. This must be used if the keys of your map can be selected *en -/// masse* by untrusted users. -/// * `twox_64_concat`: This is an insecure hasher and can only be used safely if you know that -/// the preimages cannot be chosen at will by untrusted users. It is memory-efficient, extremely -/// performant and supports iteration over its keys and values. You can safely use this is the -/// key is: -/// - A (slowly) incrementing index. -/// - Known to be the result of a cryptographic hash (though `identity` is a better choice -/// here). -/// - Known to be the public key of a cryptographic key pair in existence. -/// * `identity`: This is not a hasher at all, and just uses the key material directly. Since it -/// does no hashing or appending, it's the fastest possible hasher, however, it's also the least -/// secure. It can be used only if you know that the key will be cryptographically/securely -/// randomly distributed over the binary encoding space. In most cases this will not be true. -/// One case where it is true, however, if where the key is itself the result of a cryptographic -/// hash of some existent data. -/// -/// Other hashers will tend to be "opaque" and not support iteration over the keys in the -/// map. It is not recommended to use these. -/// -/// The generator is implemented with: -/// * `module_prefix`: $module_prefix -/// * `storage_prefix`: storage_name -/// * `Hasher`: $hash -/// -/// Thus the keys are stored at: -/// ```nocompile -/// twox128(module_prefix) ++ twox128(storage_prefix) ++ hasher(encode(key)) -/// ``` -/// -/// * Double map: `Foo: double_map hasher($hash1) u32, hasher($hash2) u32 => u32`: Implements the -/// [`StorageDoubleMap`](../frame_support/storage/trait.StorageDoubleMap.html) trait using the -/// [`StorageDoubleMap -/// generator`](../frame_support/storage/generator/trait.StorageDoubleMap.html). And -/// [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). -/// -/// `$hash1` and `$hash2` representing choices of hashing algorithms available in the -/// [`Hashable`](../frame_support/trait.Hashable.html) trait. They must be chosen with care, see -/// generator documentation. -/// -/// The generator is implemented with: -/// * `module_prefix`: $module_prefix -/// * `storage_prefix`: storage_name -/// * `Hasher1`: $hash1 -/// * `Hasher2`: $hash2 -/// -/// Thus keys are stored at: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher1(encode(key1)) ++ -/// Hasher2(encode(key2)) ``` -/// -/// Supported hashers (ordered from least to best security): -/// -/// * `identity` - Just the unrefined key material. Use only when it is known to be a secure hash -/// already. The most efficient and iterable over keys. -/// * `twox_64_concat` - TwoX with 64bit + key concatenated. Use only when an untrusted source -/// cannot select and insert key values. Very efficient and iterable over keys. -/// * `blake2_128_concat` - Blake2 with 128bit + key concatenated. Slower but safe to use in all -/// circumstances. Iterable over keys. -/// -/// Deprecated hashers, which do not support iteration over keys include: -/// * `twox_128` - TwoX with 128bit. -/// * `twox_256` - TwoX with with 256bit. -/// * `blake2_128` - Blake2 with 128bit. -/// * `blake2_256` - Blake2 with 256bit. -/// -/// Basic storage can be extended as such: -/// -/// `#vis #name get(fn #getter) config(#field_name) build(#closure): #type = #default;` -/// -/// * `#vis`: Set the visibility of the structure. `pub` or nothing. -/// * `#name`: Name of the storage item, used as a prefix in storage. -/// * \[optional\] `get(fn #getter)`: Implements the function #getter to `Module`. -/// * \[optional\] `config(#field_name)`: `field_name` is optional if get is set. -/// Will include the item in `GenesisConfig`. -/// * \[optional\] `build(#closure)`: Closure called with storage overlays. -/// * \[optional\] `max_values(#expr)`: `expr` is an expression returning a `u32`. It is used to -/// implement `StorageInfoTrait`. Note this attribute is not available for storage value as the -/// maximum number of values is 1. -/// * `#type`: Storage type. -/// * \[optional\] `#default`: Value returned when none. -/// -/// Storage items are accessible in multiple ways: -/// -/// * The structure: `Foo` or `Foo::` depending if the value type is generic or not. -/// * The `Store` trait structure: ` as Store>::Foo` -/// * The getter on the module that calls get on the structure: `Module::::foo()` -/// -/// ## GenesisConfig -/// -/// An optional `GenesisConfig` struct for storage initialization can be defined, either -/// when at least one storage field requires default initialization -/// (both `get` and `config` or `build`), or specifically as in: -/// -/// ```nocompile -/// decl_storage! { -/// trait Store for Module as Example { -/// -/// // Your storage items -/// } -/// add_extra_genesis { -/// config(genesis_field): GenesisFieldType; -/// config(genesis_field2): GenesisFieldType; -/// ... -/// build(|_: &Self| { -/// // Modification of storage -/// }) -/// } -/// } -/// ``` -/// -/// This struct can be exposed as `ExampleConfig` by the `construct_runtime!` macro like follows: -/// -/// ```nocompile -/// construct_runtime!( -/// pub enum Runtime with ... { -/// ..., -/// Example: example::{Pallet, Storage, ..., Config}, -/// ..., -/// } -/// ); -/// ``` -/// -/// ### Module with Instances -/// -/// The `decl_storage!` macro supports building modules with instances with the following syntax -/// (`DefaultInstance` type is optional): -/// -/// ```nocompile -/// trait Store for Module, I: Instance=DefaultInstance> as Example {} -/// ``` -/// -/// Accessing the structure no requires the instance as generic parameter: -/// * `Foo::` if the value type is not generic -/// * `Foo::` if the value type is generic -/// -/// ## Where clause -/// -/// This macro supports a where clause which will be replicated to all generated types. -/// -/// ```nocompile -/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} -/// ``` -/// -/// ## Limitations -/// -/// # Instancing and generic `GenesisConfig` -/// -/// If your module supports instancing and you see an error like `parameter `I` is never used` for -/// your `decl_storage!`, you are hitting a limitation of the current implementation. You probably -/// try to use an associated type of a non-instantiable trait. To solve this, add the following to -/// your macro call: -/// -/// ```nocompile -/// add_extra_genesis { -/// config(phantom): std::marker::PhantomData, -/// } -/// ``` -/// -/// This adds a field to your `GenesisConfig` with the name `phantom` that you can initialize with -/// `Default::default()`. -/// -/// ## PoV information -/// -/// To implement the trait `StorageInfoTrait` for storages an additional attribute can be used -/// `generate_storage_info`: -/// ```nocompile -/// decl_storage! { generate_storage_info -/// trait Store for ... -/// } -/// ``` -#[proc_macro] -#[deprecated(note = "Will be removed soon; use the attribute `#[pallet]` macro instead. - For more info, see: ")] -pub fn decl_storage(input: TokenStream) -> TokenStream { - storage::decl_storage_impl(input) -} - /// Construct a runtime, with the given name and the given pallets. /// /// The parameters here are specific types for `Block`, `NodeBlock`, and `UncheckedExtrinsic` @@ -464,6 +238,8 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev /// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is /// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. /// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on /// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type /// definitions. @@ -639,13 +415,13 @@ pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStre /// Derive [`Clone`] but do not bound any generic. Docs are at `frame_support::CloneNoBound`. #[proc_macro_derive(CloneNoBound)] pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { - clone_no_bound::derive_clone_no_bound(input) + no_bound::clone::derive_clone_no_bound(input) } /// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { - debug_no_bound::derive_debug_no_bound(input) + no_bound::debug::derive_debug_no_bound(input) } /// Derive [`Debug`], if `std` is enabled it uses `frame_support::DebugNoBound`, if `std` is not @@ -654,7 +430,7 @@ pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { #[proc_macro_derive(RuntimeDebugNoBound)] pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { if cfg!(any(feature = "std", feature = "try-runtime")) { - debug_no_bound::derive_debug_no_bound(input) + no_bound::debug::derive_debug_no_bound(input) } else { let input: syn::DeriveInput = match syn::parse(input) { Ok(input) => input, @@ -681,7 +457,7 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { /// `frame_support::PartialEqNoBound`. #[proc_macro_derive(PartialEqNoBound)] pub fn derive_partial_eq_no_bound(input: TokenStream) -> TokenStream { - partial_eq_no_bound::derive_partial_eq_no_bound(input) + no_bound::partial_eq::derive_partial_eq_no_bound(input) } /// derive Eq but do no bound any generic. Docs are at `frame_support::EqNoBound`. @@ -706,7 +482,7 @@ pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { /// derive `Default` but do no bound any generic. Docs are at `frame_support::DefaultNoBound`. #[proc_macro_derive(DefaultNoBound, attributes(default))] pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { - default_no_bound::derive_default_no_bound(input) + no_bound::default::derive_default_no_bound(input) } #[proc_macro] @@ -771,12 +547,302 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { } #[proc_macro_attribute] -pub fn storage_alias(_: TokenStream, input: TokenStream) -> TokenStream { - storage_alias::storage_alias(input.into()) +pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { + storage_alias::storage_alias(attributes.into(), input.into()) .unwrap_or_else(|r| r.into_compile_error()) .into() } +/// This attribute can be used to derive a full implementation of a trait based on a local partial +/// impl and an external impl containing defaults that can be overriden in the local impl. +/// +/// For a full end-to-end example, see [below](#use-case-auto-derive-test-pallet-config-traits). +/// +/// # Usage +/// +/// The attribute should be attached to an impl block (strictly speaking a `syn::ItemImpl`) for +/// which we want to inject defaults in the event of missing trait items in the block. +/// +/// The attribute minimally takes a single `default_impl_path` argument, which should be the module +/// path to an impl registered via [`#[register_default_impl]`](`macro@register_default_impl`) that +/// contains the default trait items we want to potentially inject, with the general form: +/// +/// ```ignore +/// #[derive_impl(default_impl_path)] +/// impl SomeTrait for SomeStruct { +/// ... +/// } +/// ``` +/// +/// Optionally, a `disambiguation_path` can be specified as follows by providing `as path::here` +/// after the `default_impl_path`: +/// +/// ```ignore +/// #[derive_impl(default_impl_path as disambiguation_path)] +/// impl SomeTrait for SomeStruct { +/// ... +/// } +/// ``` +/// +/// The `disambiguation_path`, if specified, should be the path to a trait that will be used to +/// qualify all default entries that are injected into the local impl. For example if your +/// `default_impl_path` is `some::path::TestTraitImpl` and your `disambiguation_path` is +/// `another::path::DefaultTrait`, any items injected into the local impl will be qualified as +/// `::specific_trait_item`. +/// +/// If you omit the `as disambiguation_path` portion, the `disambiguation_path` will internally +/// default to `A` from the `impl A for B` part of the default impl. This is useful for scenarios +/// where all of the relevant types are already in scope via `use` statements. +/// +/// Conversely, the `default_impl_path` argument is required and cannot be omitted. +/// +/// You can also make use of `#[pallet::no_default]` on specific items in your default impl that you +/// want to ensure will not be copied over but that you nonetheless want to use locally in the +/// context of the foreign impl and the pallet (or context) in which it is defined. +/// +/// ## Use-Case Example: Auto-Derive Test Pallet Config Traits +/// +/// The `#[derive_imp(..)]` attribute can be used to derive a test pallet `Config` based on an +/// existing pallet `Config` that has been marked with +/// [`#[pallet::config(with_default)]`](`macro@config`) (which under the hood, generates a +/// `DefaultConfig` trait in the pallet in which the macro was invoked). +/// +/// In this case, the `#[derive_impl(..)]` attribute should be attached to an `impl` block that +/// implements a compatible `Config` such as `frame_system::Config` for a test/mock runtime, and +/// should receive as its first argument the path to a `DefaultConfig` impl that has been registered +/// via [`#[register_default_impl]`](`macro@register_default_impl`), and as its second argument, the +/// path to the auto-generated `DefaultConfig` for the existing pallet `Config` we want to base our +/// test config off of. +/// +/// The following is what the `basic` example pallet would look like with a default testing config: +/// +/// ```ignore +/// #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::pallet::DefaultConfig)] +/// impl frame_system::Config for Test { +/// // These are all defined by system as mandatory. +/// type BaseCallFilter = frame_support::traits::Everything; +/// type RuntimeEvent = RuntimeEvent; +/// type RuntimeCall = RuntimeCall; +/// type RuntimeOrigin = RuntimeOrigin; +/// type OnSetCode = (); +/// type PalletInfo = PalletInfo; +/// type Block = Block; +/// // We decide to override this one. +/// type AccountData = pallet_balances::AccountData; +/// } +/// ``` +/// +/// where `TestDefaultConfig` was defined and registered as follows: +/// +/// ```ignore +/// pub struct TestDefaultConfig; +/// +/// #[register_default_impl(TestDefaultConfig)] +/// impl DefaultConfig for TestDefaultConfig { +/// type Version = (); +/// type BlockWeights = (); +/// type BlockLength = (); +/// type DbWeight = (); +/// type Nonce = u64; +/// type BlockNumber = u64; +/// type Hash = sp_core::hash::H256; +/// type Hashing = sp_runtime::traits::BlakeTwo256; +/// type AccountId = AccountId; +/// type Lookup = IdentityLookup; +/// type BlockHashCount = frame_support::traits::ConstU64<10>; +/// type AccountData = u32; +/// type OnNewAccount = (); +/// type OnKilledAccount = (); +/// type SystemWeightInfo = (); +/// type SS58Prefix = (); +/// type MaxConsumers = frame_support::traits::ConstU32<16>; +/// } +/// ``` +/// +/// The above call to `derive_impl` would expand to roughly the following: +/// +/// ```ignore +/// impl frame_system::Config for Test { +/// use frame_system::config_preludes::TestDefaultConfig; +/// use frame_system::pallet::DefaultConfig; +/// +/// type BaseCallFilter = frame_support::traits::Everything; +/// type RuntimeEvent = RuntimeEvent; +/// type RuntimeCall = RuntimeCall; +/// type RuntimeOrigin = RuntimeOrigin; +/// type OnSetCode = (); +/// type PalletInfo = PalletInfo; +/// type Block = Block; +/// type AccountData = pallet_balances::AccountData; +/// type Version = ::Version; +/// type BlockWeights = ::BlockWeights; +/// type BlockLength = ::BlockLength; +/// type DbWeight = ::DbWeight; +/// type Nonce = ::Nonce; +/// type BlockNumber = ::BlockNumber; +/// type Hash = ::Hash; +/// type Hashing = ::Hashing; +/// type AccountId = ::AccountId; +/// type Lookup = ::Lookup; +/// type BlockHashCount = ::BlockHashCount; +/// type OnNewAccount = ::OnNewAccount; +/// type OnKilledAccount = ::OnKilledAccount; +/// type SystemWeightInfo = ::SystemWeightInfo; +/// type SS58Prefix = ::SS58Prefix; +/// type MaxConsumers = ::MaxConsumers; +/// } +/// ``` +/// +/// You can then use the resulting `Test` config in test scenarios. +/// +/// Note that items that are _not_ present in our local `DefaultConfig` are automatically copied +/// from the foreign trait (in this case `TestDefaultConfig`) into the local trait impl (in this +/// case `Test`), unless the trait item in the local trait impl is marked with +/// [`#[pallet::no_default]`](`macro@no_default`), in which case it cannot be overridden, and any +/// attempts to do so will result in a compiler error. +/// +/// See `frame/examples/default-config/tests.rs` for a runnable end-to-end example pallet that makes +/// use of `derive_impl` to derive its testing config. +/// +/// See [here](`macro@config`) for more information and caveats about the auto-generated +/// `DefaultConfig` trait. +/// +/// ## Optional Conventions +/// +/// Note that as an optional convention, we encourage creating a `config_preludes` module inside of +/// your pallet. This is the convention we follow for `frame_system`'s `TestDefaultConfig` which, as +/// shown above, is located at `frame_system::config_preludes::TestDefaultConfig`. This is just a +/// suggested convention -- there is nothing in the code that expects modules with these names to be +/// in place, so there is no imperative to follow this pattern unless desired. +/// +/// In `config_preludes`, you can place types named like: +/// +/// * `TestDefaultConfig` +/// * `ParachainDefaultConfig` +/// * `SolochainDefaultConfig` +/// +/// Signifying in which context they can be used. +/// +/// # Advanced Usage +/// +/// ## Expansion +/// +/// The `#[derive_impl(default_impl_path as disambiguation_path)]` attribute will expand to the +/// local impl, with any extra items from the foreign impl that aren't present in the local impl +/// also included. In the case of a colliding trait item, the version of the item that exists in the +/// local impl will be retained. All imported items are qualified by the `disambiguation_path`, as +/// discussed above. +/// +/// ## Handling of Unnamed Trait Items +/// +/// Items that lack a `syn::Ident` for whatever reason are first checked to see if they exist, +/// verbatim, in the local/destination trait before they are copied over, so you should not need to +/// worry about collisions between identical unnamed items. +#[import_tokens_attr { + format!( + "{}::macro_magic", + match generate_crate_access_2018("frame-support") { + Ok(path) => Ok(path), + Err(_) => generate_crate_access_2018("frame"), + } + .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .to_token_stream() + .to_string() + ) +}] +#[with_custom_parsing(derive_impl::DeriveImplAttrArgs)] +#[proc_macro_attribute] +pub fn derive_impl(attrs: TokenStream, input: TokenStream) -> TokenStream { + let custom_attrs = parse_macro_input!(__custom_tokens as derive_impl::DeriveImplAttrArgs); + derive_impl::derive_impl( + __source_path.into(), + attrs.into(), + input.into(), + custom_attrs.disambiguation_path, + ) + .unwrap_or_else(|r| r.into_compile_error()) + .into() +} + +/// The optional attribute `#[pallet::no_default]` can be attached to trait items within a +/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. +/// +/// Attaching this attribute to a trait item ensures that that trait item will not be used as a +/// default with the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. +#[proc_macro_attribute] +pub fn no_default(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Attach this attribute to an impl statement that you want to use with +/// [`#[derive_impl(..)]`](`macro@derive_impl`). +/// +/// You must also provide an identifier/name as the attribute's argument. This is the name you +/// must provide to [`#[derive_impl(..)]`](`macro@derive_impl`) when you import this impl via +/// the `default_impl_path` argument. This name should be unique at the crate-level. +/// +/// ## Example +/// +/// ```ignore +/// pub struct ExampleTestDefaultConfig; +/// +/// #[register_default_impl(ExampleTestDefaultConfig)] +/// impl DefaultConfig for ExampleTestDefaultConfig { +/// type Version = (); +/// type BlockWeights = (); +/// type BlockLength = (); +/// ... +/// type SS58Prefix = (); +/// type MaxConsumers = frame_support::traits::ConstU32<16>; +/// } +/// ``` +/// +/// ## Advanced Usage +/// +/// This macro acts as a thin wrapper around macro_magic's `#[export_tokens]`. See the docs +/// [here](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) for more +/// info. +/// +/// There are some caveats when applying a `use` statement to bring a +/// `#[register_default_impl]` item into scope. If you have a `#[register_default_impl]` +/// defined in `my_crate::submodule::MyItem`, it is currently not sufficient to do something +/// like: +/// +/// ```ignore +/// use my_crate::submodule::MyItem; +/// #[derive_impl(MyItem as Whatever)] +/// ``` +/// +/// This will fail with a mysterious message about `__export_tokens_tt_my_item` not being +/// defined. +/// +/// You can, however, do any of the following: +/// ```ignore +/// // partial path works +/// use my_crate::submodule; +/// #[derive_impl(submodule::MyItem as Whatever)] +/// ``` +/// ```ignore +/// // full path works +/// #[derive_impl(my_crate::submodule::MyItem as Whatever)] +/// ``` +/// ```ignore +/// // wild-cards work +/// use my_crate::submodule::*; +/// #[derive_impl(MyItem as Whatever)] +/// ``` +#[proc_macro_attribute] +pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenStream { + // ensure this is a impl statement + let item_impl = syn::parse_macro_input!(tokens as ItemImpl); + + // internally wrap macro_magic's `#[export_tokens]` macro + match macro_magic::mm_core::export_tokens_internal(attrs, item_impl.to_token_stream(), true) { + Ok(tokens) => tokens.into(), + Err(err) => err.to_compile_error().into(), + } +} + /// Used internally to decorate pallet attribute macro stubs when they are erroneously used /// outside of a pallet module fn pallet_macro_stub() -> TokenStream { @@ -809,6 +875,52 @@ fn pallet_macro_stub() -> TokenStream { /// /// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item /// in your `#[pallet::config]`. +/// +/// ## Optional: `with_default` +/// +/// An optional `with_default` argument may also be specified. Doing so will automatically +/// generate a `DefaultConfig` trait inside your pallet which is suitable for use with +/// [`[#[derive_impl(..)]`](`macro@derive_impl`) to derive a default testing config: +/// +/// ```ignore +/// #[pallet::config(with_default)] +/// pub trait Config: frame_system::Config { +/// type RuntimeEvent: Parameter +/// + Member +/// + From> +/// + Debug +/// + IsType<::RuntimeEvent>; +/// +/// #[pallet::no_default] +/// type BaseCallFilter: Contains; +/// // ... +/// } +/// ``` +/// +/// As shown above, you may also attach the [`#[pallet::no_default]`](`macro@no_default`) +/// attribute to specify that a particular trait item _cannot_ be used as a default when a test +/// `Config` is derived using the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. +/// This will cause that particular trait item to simply not appear in default testing configs +/// based on this config (the trait item will not be included in `DefaultConfig`). +/// +/// ### `DefaultConfig` Caveats +/// +/// The auto-generated `DefaultConfig` trait: +/// - is always a _subset_ of your pallet's `Config` trait. +/// - can only contain items that don't rely on externalities, such as `frame_system::Config`. +/// +/// Trait items that _do_ rely on externalities should be marked with +/// [`#[pallet::no_default]`](`macro@no_default`) +/// +/// Consequently: +/// - Any items that rely on externalities _must_ be marked with +/// [`#[pallet::no_default]`](`macro@no_default`) or your trait will fail to compile when used +/// with [`derive_impl`](`macro@derive_impl`). +/// - Items marked with [`#[pallet::no_default]`](`macro@no_default`) are entirely excluded from the +/// `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to implement +/// such items. +/// +/// For more information, see [`macro@derive_impl`]. #[proc_macro_attribute] pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1217,7 +1329,7 @@ pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { /// ```ignore /// #[pallet::storage] /// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; +/// pub(super) type Number = StorageValue<_, frame_system::pallet_prelude::BlockNumberFor::, ValueQuery>; /// ``` /// /// NOTE: As with all `pallet::*` attributes, this one _must_ be written as @@ -1309,8 +1421,7 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { /// The macro will add the following attribute: /// * `#[cfg(feature = "std")]` /// -/// The macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as a second -/// generic for non-instantiable pallets. +/// The macro will implement `sp_runtime::BuildStorage`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1421,3 +1532,113 @@ pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } + +/// Can be attached to a module. Doing so will declare that module as importable into a pallet +/// via [`#[import_section]`](`macro@import_section`). +/// +/// Note that sections are imported by their module name/ident, and should be referred to by +/// their _full path_ from the perspective of the target pallet. Do not attempt to make use +/// of `use` statements to bring pallet sections into scope, as this will not work (unless +/// you do so as part of a wildcard import, in which case it will work). +/// +/// ## Naming Logistics +/// +/// Also note that because of how `#[pallet_section]` works, pallet section names must be +/// globally unique _within the crate in which they are defined_. For more information on +/// why this must be the case, see macro_magic's +/// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. +/// +/// Optionally, you may provide an argument to `#[pallet_section]` such as +/// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in +/// same crate with the same ident/name. The ident you specify can then be used instead of +/// the module's ident name when you go to import it via `#[import_section]`. +#[proc_macro_attribute] +pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { + let tokens_clone = tokens.clone(); + // ensure this can only be attached to a module + let _mod = parse_macro_input!(tokens_clone as ItemMod); + + // use macro_magic's export_tokens as the internal implementation otherwise + match macro_magic::mm_core::export_tokens_internal(attr, tokens, false) { + Ok(tokens) => tokens.into(), + Err(err) => err.to_compile_error().into(), + } +} + +/// An attribute macro that can be attached to a module declaration. Doing so will +/// Imports the contents of the specified external pallet section that was defined +/// previously using [`#[pallet_section]`](`macro@pallet_section`). +/// +/// ## Example +/// ```ignore +/// #[import_section(some_section)] +/// #[pallet] +/// pub mod pallet { +/// // ... +/// } +/// ``` +/// where `some_section` was defined elsewhere via: +/// ```ignore +/// #[pallet_section] +/// pub mod some_section { +/// // ... +/// } +/// ``` +/// +/// This will result in the contents of `some_section` being _verbatim_ imported into +/// the pallet above. Note that since the tokens for `some_section` are essentially +/// copy-pasted into the target pallet, you cannot refer to imports that don't also +/// exist in the target pallet, but this is easily resolved by including all relevant +/// `use` statements within your pallet section, so they are imported as well, or by +/// otherwise ensuring that you have the same imports on the target pallet. +/// +/// It is perfectly permissible to import multiple pallet sections into the same pallet, +/// which can be done by having multiple `#[import_section(something)]` attributes +/// attached to the pallet. +/// +/// Note that sections are imported by their module name/ident, and should be referred to by +/// their _full path_ from the perspective of the target pallet. +#[import_tokens_attr { + format!( + "{}::macro_magic", + match generate_crate_access_2018("frame-support") { + Ok(path) => Ok(path), + Err(_) => generate_crate_access_2018("frame"), + } + .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .to_token_stream() + .to_string() + ) +}] +#[proc_macro_attribute] +pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { + let foreign_mod = parse_macro_input!(attr as ItemMod); + let mut internal_mod = parse_macro_input!(tokens as ItemMod); + + // check that internal_mod is a pallet module + if !internal_mod.attrs.iter().any(|attr| { + if let Some(last_seg) = attr.path().segments.last() { + last_seg.ident == "pallet" + } else { + false + } + }) { + return Error::new( + internal_mod.ident.span(), + "`#[import_section]` can only be applied to a valid pallet module", + ) + .to_compile_error() + .into() + } + + if let Some(ref mut content) = internal_mod.content { + if let Some(foreign_content) = foreign_mod.content { + content.1.extend(foreign_content.1); + } + } + + quote! { + #internal_mod + } + .into() +} diff --git a/frame/support/procedural/src/clone_no_bound.rs b/frame/support/procedural/src/no_bound/clone.rs similarity index 100% rename from frame/support/procedural/src/clone_no_bound.rs rename to frame/support/procedural/src/no_bound/clone.rs diff --git a/frame/support/procedural/src/debug_no_bound.rs b/frame/support/procedural/src/no_bound/debug.rs similarity index 100% rename from frame/support/procedural/src/debug_no_bound.rs rename to frame/support/procedural/src/no_bound/debug.rs diff --git a/frame/support/procedural/src/default_no_bound.rs b/frame/support/procedural/src/no_bound/default.rs similarity index 100% rename from frame/support/procedural/src/default_no_bound.rs rename to frame/support/procedural/src/no_bound/default.rs diff --git a/frame/support/procedural/src/no_bound/mod.rs b/frame/support/procedural/src/no_bound/mod.rs new file mode 100644 index 0000000000000..2f76b01726150 --- /dev/null +++ b/frame/support/procedural/src/no_bound/mod.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Derive macros to derive traits without bounding generic parameters. + +pub mod clone; +pub mod debug; +pub mod default; +pub mod partial_eq; diff --git a/frame/support/procedural/src/partial_eq_no_bound.rs b/frame/support/procedural/src/no_bound/partial_eq.rs similarity index 100% rename from frame/support/procedural/src/partial_eq_no_bound.rs rename to frame/support/procedural/src/no_bound/partial_eq.rs diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index f17fdc81a647c..aa6a8e09f5250 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -16,7 +16,10 @@ // limitations under the License. use crate::{ - pallet::{parse::call::CallWeightDef, Def}, + pallet::{ + parse::call::{CallVariantDef, CallWeightDef}, + Def, + }, COUNTER, }; use proc_macro2::TokenStream as TokenStream2; @@ -113,7 +116,22 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } debug_assert_eq!(fn_weight.len(), methods.len()); - let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); + let map_fn_docs = if !def.dev_mode { + // Emit the [`Pallet::method`] documentation only for non-dev modes. + |method: &CallVariantDef| { + let reference = format!("See [`Pallet::{}`].", method.name); + quote!(#reference) + } + } else { + // For the dev-mode do not provide a documenation link as it will break the + // `cargo doc` if the pallet is private inside a test. + |method: &CallVariantDef| { + let reference = format!("See `Pallet::{}`.", method.name); + quote!(#reference) + } + }; + + let fn_doc = methods.iter().map(map_fn_docs).collect::>(); let args_name = methods .iter() @@ -175,9 +193,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { .collect::>() }); - let default_docs = [syn::parse_quote!( - r"Contains one variant per dispatchable that can be called by an extrinsic." - )]; + let default_docs = + [syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")]; let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] }; let maybe_compile_error = if def.call.is_none() { @@ -274,7 +291,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::Never, ), #( - #( #[doc = #fn_doc] )* + #[doc = #fn_doc] #[codec(index = #call_index)] #fn_name { #( @@ -334,10 +351,6 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } - // Deprecated, but will warn when used - #[allow(deprecated)] - impl<#type_impl_gen> #frame_support::weights::GetDispatchInfo for #call_ident<#type_use_gen> #where_clause {} - impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen> #where_clause { diff --git a/frame/support/procedural/src/pallet/expand/config.rs b/frame/support/procedural/src/pallet/expand/config.rs index c70f6eb80422a..c1b8eb022471f 100644 --- a/frame/support/procedural/src/pallet/expand/config.rs +++ b/frame/support/procedural/src/pallet/expand/config.rs @@ -16,30 +16,66 @@ // limitations under the License. use crate::pallet::Def; -use frame_support_procedural_tools::get_doc_literals; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{parse_quote, Item}; /// /// * Generate default rust doc -pub fn expand_config(def: &mut Def) -> proc_macro2::TokenStream { +pub fn expand_config(def: &mut Def) -> TokenStream { let config = &def.config; let config_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[config.index]; - if let syn::Item::Trait(item) = item { + if let Item::Trait(item) = item { item } else { unreachable!("Checked by config parser") } }; - if get_doc_literals(&config_item.attrs).is_empty() { - config_item.attrs.push(syn::parse_quote!( + config_item.attrs.insert( + 0, + parse_quote!( #[doc = r" - Configuration trait of this pallet. +Configuration trait of this pallet. - Implement this type for a runtime in order to customize this pallet. - "] - )); - } +The main purpose of this trait is to act as an interface between this pallet and the runtime in +which it is embedded in. A type, function, or constant in this trait is essentially left to be +configured by the runtime that includes this pallet. + +Consequently, a runtime that wants to include this pallet must implement this trait." + ] + ), + ); + + // we only emit `DefaultConfig` if there are trait items, so an empty `DefaultConfig` is + // impossible consequently. + match &config.default_sub_trait { + Some(default_sub_trait) if default_sub_trait.items.len() > 0 => { + let trait_items = &default_sub_trait.items; - Default::default() + let type_param_bounds = if default_sub_trait.has_system { + let system = &def.frame_system; + quote::quote!(: #system::DefaultConfig) + } else { + quote::quote!() + }; + + quote!( + /// Based on [`Config`]. Auto-generated by + /// [`#[pallet::config(with_default)]`](`frame_support::pallet_macros::config`). + /// Can be used in tandem with + /// [`#[register_default_config]`](`frame_support::register_default_config`) and + /// [`#[derive_impl]`](`frame_support::derive_impl`) to derive test config traits + /// based on existing pallet config traits in a safe and developer-friendly way. + /// + /// See [here](`frame_support::pallet_macros::config`) for more information and caveats about + /// the auto-generated `DefaultConfig` trait and how it is generated. + pub trait DefaultConfig #type_param_bounds { + #(#trait_items)* + } + ) + }, + _ => Default::default(), + } } diff --git a/frame/support/procedural/src/pallet/expand/doc_only.rs b/frame/support/procedural/src/pallet/expand/doc_only.rs index 32c9329f29498..50afeb3ca88cf 100644 --- a/frame/support/procedural/src/pallet/expand/doc_only.rs +++ b/frame/support/procedural/src/pallet/expand/doc_only.rs @@ -20,8 +20,6 @@ use proc_macro2::Span; use crate::pallet::Def; pub fn expand_doc_only(def: &mut Def) -> proc_macro2::TokenStream { - let storage_names = def.storages.iter().map(|storage| &storage.ident); - let storage_docs = def.storages.iter().map(|storage| &storage.docs); let dispatchables = if let Some(call_def) = &def.call { let type_impl_generics = def.type_impl_generics(Span::call_site()); call_def @@ -35,17 +33,16 @@ pub fn expand_doc_only(def: &mut Def) -> proc_macro2::TokenStream { .map(|(_, arg_name, arg_type)| quote::quote!( #arg_name: #arg_type, )) .collect::(); let docs = &method.docs; - let line_2 = - format!(" designed to document the [`{}`][`Call::{}`] variant of", name, name); + + let real = format!(" [`Pallet::{}`].", name); quote::quote!( #( #[doc = #docs] )* /// - /// --- + /// # Warning: Doc-Only /// - /// NOTE: This function is an automatically generated, doc only, uncallable stub. - #[ doc = #line_2 ] - /// the pallet [`Call`] enum. You should not attempt to call this function - /// directly. + /// This function is an automatically generated, and is doc-only, uncallable + /// stub. See the real version in + #[ doc = #real ] pub fn #name<#type_impl_generics>(#args) { unreachable!(); } ) }) @@ -54,22 +51,49 @@ pub fn expand_doc_only(def: &mut Def) -> proc_macro2::TokenStream { quote::quote!() }; + let storage_types = def + .storages + .iter() + .map(|storage| { + let storage_name = &storage.ident; + let storage_type_docs = &storage.docs; + let real = format!("[`pallet::{}`].", storage_name); + quote::quote!( + #( #[doc = #storage_type_docs] )* + /// + /// # Warning: Doc-Only + /// + /// This type is automatically generated, and is doc-only. See the real version in + #[ doc = #real ] + pub struct #storage_name(); + ) + }) + .collect::(); + quote::quote!( - /// Auto-generated docs-only module listing all defined storage types for this pallet. - /// Note that members of this module cannot be used directly and are only provided for - /// documentation purposes. + /// Auto-generated docs-only module listing all (public and private) defined storage types + /// for this pallet. + /// + /// # Warning: Doc-Only + /// + /// Members of this module cannot be used directly and are only provided for documentation + /// purposes. + /// + /// To see the actual storage type, find a struct with the same name at the root of the + /// pallet, in the list of [*Type Definitions*](../index.html#types). #[cfg(doc)] pub mod storage_types { use super::*; - #( - #( #[doc = #storage_docs] )* - pub struct #storage_names(); - )* + #storage_types } /// Auto-generated docs-only module listing all defined dispatchables for this pallet. - /// Note that members of this module cannot be used directly and are only provided for - /// documentation purposes. + /// + /// # Warning: Doc-Only + /// + /// Members of this module cannot be used directly and are only provided for documentation + /// purposes. To see the real version of each dispatchable, look for them in [`Pallet`] or + /// [`Call`]. #[cfg(doc)] pub mod dispatchables { use super::*; diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 70f9fdfc71112..376a6a9f51c6d 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -110,10 +110,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( - #[doc = r" - Custom [dispatch errors](https://docs.substrate.io/main-docs/build/events-errors/) - of this pallet. - "] + #[doc = "The `Error` enum of this pallet."] )); } diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index 2f0cefb8b9fc3..f94bdef332d9d 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -97,12 +97,9 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { } if get_doc_literals(&event_item.attrs).is_empty() { - event_item.attrs.push(syn::parse_quote!( - #[doc = r" - The [event](https://docs.substrate.io/main-docs/build/events-errors/) emitted - by this pallet. - "] - )); + event_item + .attrs + .push(syn::parse_quote!(#[doc = "The `Event` enum of this pallet"])); } // derive some traits because system event require Clone, FullCodec, Eq, PartialEq and Debug diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs index 9447154f386dd..f54d308fce304 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -18,8 +18,7 @@ use crate::pallet::Def; /// -/// * implement the trait `sp_runtime::BuildModuleGenesisStorage` -/// * add #[cfg(feature = "std")] to GenesisBuild implementation. +/// * implement the trait `sp_runtime::BuildStorage` pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config = if let Some(genesis_config) = &def.genesis_config { genesis_config @@ -29,44 +28,22 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_build = def.genesis_build.as_ref().expect("Checked by def parser"); let frame_support = &def.frame_support; - let type_impl_gen = &def.type_impl_generics(genesis_build.attr_span); - let type_use_gen = &def.type_use_generics(genesis_build.attr_span); - let trait_use_gen = if def.config.has_instance { - quote::quote_spanned!(genesis_build.attr_span => T, I) - } else { - // `__InherentHiddenInstance` used by construct_runtime here is alias for `()` - quote::quote_spanned!(genesis_build.attr_span => T, ()) - }; + let type_impl_gen = &genesis_config.gen_kind.type_impl_gen(genesis_build.attr_span); let gen_cfg_ident = &genesis_config.genesis_config; + let gen_cfg_use_gen = &genesis_config.gen_kind.type_use_gen(genesis_build.attr_span); - let gen_cfg_use_gen = genesis_config.gen_kind.type_use_gen(genesis_build.attr_span); - - let genesis_build_item = - &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_build.index]; - - let genesis_build_item_impl = if let syn::Item::Impl(impl_) = genesis_build_item { - impl_ - } else { - unreachable!("Checked by genesis_build parser") - }; - - genesis_build_item_impl.attrs.push(syn::parse_quote!( #[cfg(feature = "std")] )); let where_clause = &genesis_build.where_clause; quote::quote_spanned!(genesis_build.attr_span => #[cfg(feature = "std")] - impl<#type_impl_gen> #frame_support::sp_runtime::BuildModuleGenesisStorage<#trait_use_gen> - for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause - { - fn build_module_genesis_storage( - &self, - storage: &mut #frame_support::sp_runtime::Storage, - ) -> std::result::Result<(), std::string::String> { - #frame_support::BasicExternalities::execute_with_storage(storage, || { - >::build(self); - Ok(()) - }) + impl<#type_impl_gen> #frame_support::sp_runtime::BuildStorage for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause + { + fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> std::result::Result<(), std::string::String> { + #frame_support::BasicExternalities::execute_with_storage(storage, || { + self.build(); + Ok(()) + }) + } } - } ) } diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index de46afecf3d01..cbe47bd8505f6 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -95,23 +95,13 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { )); } attrs.push(syn::parse_quote!( - #[cfg_attr(feature = "std", derive(#frame_support::Serialize, #frame_support::Deserialize))] + #[derive(#frame_support::Serialize, #frame_support::Deserialize)] )); - attrs.push( - syn::parse_quote!( #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] ), - ); - attrs.push( - syn::parse_quote!( #[cfg_attr(feature = "std", serde(deny_unknown_fields))] ), - ); - attrs.push( - syn::parse_quote!( #[cfg_attr(feature = "std", serde(bound(serialize = "")))] ), - ); - attrs.push( - syn::parse_quote!( #[cfg_attr(feature = "std", serde(bound(deserialize = "")))] ), - ); - attrs.push( - syn::parse_quote!( #[cfg_attr(feature = "std", serde(crate = #serde_crate))] ), - ); + attrs.push(syn::parse_quote!( #[serde(rename_all = "camelCase")] )); + attrs.push(syn::parse_quote!( #[serde(deny_unknown_fields)] )); + attrs.push(syn::parse_quote!( #[serde(bound(serialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(bound(deserialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(crate = #serde_crate)] )); }, _ => unreachable!("Checked by genesis_config parser"), } diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index d7d8bbded95d6..d2d2b2967fafb 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -75,7 +75,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let frame_system = &def.frame_system; quote::quote! { impl<#type_impl_gen> - #frame_support::traits::Hooks<::BlockNumber> + #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause {} } } else { @@ -105,7 +105,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { current_version, ); - return Err("On chain and current storage version do not match. Missing runtime upgrade?"); + return Err("On chain and current storage version do not match. Missing runtime upgrade?".into()); } } } else { @@ -128,7 +128,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { ); return Err("On chain storage version set, while the pallet doesn't \ - have the `#[pallet::storage_version(VERSION)]` attribute."); + have the `#[pallet::storage_version(VERSION)]` attribute.".into()); } } }; @@ -137,50 +137,50 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #hooks_impl impl<#type_impl_gen> - #frame_support::traits::OnFinalize<::BlockNumber> + #frame_support::traits::OnFinalize<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause { - fn on_finalize(n: ::BlockNumber) { + fn on_finalize(n: #frame_system::pallet_prelude::BlockNumberFor::) { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!("on_finalize") ); < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::on_finalize(n) } } impl<#type_impl_gen> - #frame_support::traits::OnIdle<::BlockNumber> + #frame_support::traits::OnIdle<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause { fn on_idle( - n: ::BlockNumber, + n: #frame_system::pallet_prelude::BlockNumberFor::, remaining_weight: #frame_support::weights::Weight ) -> #frame_support::weights::Weight { < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::on_idle(n, remaining_weight) } } impl<#type_impl_gen> - #frame_support::traits::OnInitialize<::BlockNumber> + #frame_support::traits::OnInitialize<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause { fn on_initialize( - n: ::BlockNumber + n: #frame_system::pallet_prelude::BlockNumberFor:: ) -> #frame_support::weights::Weight { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!("on_initialize") ); < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::on_initialize(n) } @@ -205,40 +205,40 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::on_runtime_upgrade() } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<#frame_support::sp_std::vec::Vec, &'static str> { + fn pre_upgrade() -> Result<#frame_support::sp_std::vec::Vec, #frame_support::sp_runtime::TryRuntimeError> { < Self as - #frame_support::traits::Hooks<::BlockNumber> + #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> >::pre_upgrade() } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: #frame_support::sp_std::vec::Vec) -> Result<(), &'static str> { + fn post_upgrade(state: #frame_support::sp_std::vec::Vec) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { #post_storage_version_check < Self as - #frame_support::traits::Hooks<::BlockNumber> + #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> >::post_upgrade(state) } } impl<#type_impl_gen> - #frame_support::traits::OffchainWorker<::BlockNumber> + #frame_support::traits::OffchainWorker<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause { - fn offchain_worker(n: ::BlockNumber) { + fn offchain_worker(n: #frame_system::pallet_prelude::BlockNumberFor::) { < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::offchain_worker(n) } @@ -251,28 +251,30 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #where_clause { fn integrity_test() { - < - Self as #frame_support::traits::Hooks< - ::BlockNumber - > + #frame_support::sp_io::TestExternalities::default().execute_with(|| { + < + Self as #frame_support::traits::Hooks< + #frame_system::pallet_prelude::BlockNumberFor:: + > >::integrity_test() + }); } } } #[cfg(feature = "try-runtime")] impl<#type_impl_gen> - #frame_support::traits::TryState<::BlockNumber> + #frame_support::traits::TryState<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause { fn try_state( - n: ::BlockNumber, + n: #frame_system::pallet_prelude::BlockNumberFor::, _s: #frame_support::traits::TryStateSelect - ) -> Result<(), &'static str> { + ) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { #log_try_state < Self as #frame_support::traits::Hooks< - ::BlockNumber + #frame_system::pallet_prelude::BlockNumberFor:: > >::try_state(n) } diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 926ab0ec82d73..2b998227c1d84 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -36,7 +36,6 @@ mod type_value; mod validate_unsigned; use crate::pallet::Def; -use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; /// Merge where clause together, `where` token span is taken from the first not none one. @@ -75,16 +74,24 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let tt_default_parts = tt_default_parts::expand_tt_default_parts(&mut def); let doc_only = doc_only::expand_doc_only(&mut def); - if get_doc_literals(&def.item.attrs).is_empty() { - def.item.attrs.push(syn::parse_quote!( - #[doc = r" - The module that hosts all the - [FRAME](https://docs.substrate.io/main-docs/build/events-errors/) - types needed to add this pallet to a - runtime. - "] - )); - } + def.item.attrs.insert( + 0, + syn::parse_quote!( + #[doc = r"The `pallet` module in each FRAME pallet hosts the most important items needed +to construct this pallet. + +The main components of this pallet are: +- [`Pallet`], which implements all of the dispatchable extrinsics of the pallet, among +other public functions. + - The subset of the functions that are dispatchable can be identified either in the + [`dispatchables`] module or in the [`Call`] enum. +- [`storage_types`], which contains the list of all types that are representing a +storage item. Otherwise, all storage items are listed among [*Type Definitions*](#types). +- [`Config`], which contains the configuration trait of this pallet. +- [`Event`] and [`Error`], which are listed among the [*Enums*](#enums). + "] + ), + ); let new_items = quote::quote!( #metadata_docs diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 99d2d79f231d9..800e23388c1af 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,8 +62,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://docs.substrate.io/reference/frame-pallets/#pallets) implementing - the on-chain logic. + The `Pallet` struct, the main type that implements traits and standalone + functions within the pallet. "] )); } diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index c742ddcd25fbc..f3c394d731f5c 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -22,6 +22,7 @@ use crate::{ Def, }, }; +use itertools::Itertools; use quote::ToTokens; use std::{collections::HashMap, ops::IndexMut}; use syn::spanned::Spanned; @@ -193,18 +194,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result { - args.args.push(syn::GenericArgument::Type(hasher)); - args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value.clone())); - let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); - set_result_query_type_parameter(&mut query_kind)?; - args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); - args.args.push(syn::GenericArgument::Type(on_empty)); - let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); - args.args.push(syn::GenericArgument::Type(max_values)); - }, + StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } | StorageGenerics::CountedMap { hasher, key, @@ -247,7 +237,14 @@ pub fn process_generics(def: &mut Def) -> syn::Result { + StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values } | + StorageGenerics::CountedNMap { + keygen, + value, + query_kind, + on_empty, + max_values, + } => { args.args.push(syn::GenericArgument::Type(keygen)); args.args.push(syn::GenericArgument::Type(value.clone())); let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); @@ -264,7 +261,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result (1, 2, 3), - Metadata::NMap { .. } => (2, 3, 4), + Metadata::NMap { .. } | Metadata::CountedNMap { .. } => (2, 3, 4), Metadata::Map { .. } | Metadata::CountedMap { .. } => (3, 4, 5), Metadata::DoubleMap { .. } => (5, 6, 7), }; @@ -310,6 +307,76 @@ pub fn process_generics(def: &mut Def) -> syn::Result t, + _ => unreachable!("Checked by def"), + }; + typ_item.attrs.push(syn::parse_quote!(#[doc = ""])); + typ_item.attrs.push(syn::parse_quote!(#[doc = #doc_line])); + }; + def.storages.iter_mut().for_each(|storage| match &storage.metadata { + Metadata::Value { value } => { + let doc_line = format!( + "Storage type is [`StorageValue`] with value type `{}`.", + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + Metadata::Map { key, value } => { + let doc_line = format!( + "Storage type is [`StorageMap`] with key type `{}` and value type `{}`.", + key.to_token_stream(), + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + Metadata::DoubleMap { key1, key2, value } => { + let doc_line = format!( + "Storage type is [`StorageDoubleMap`] with key1 type {}, key2 type {} and value type {}.", + key1.to_token_stream(), + key2.to_token_stream(), + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + Metadata::NMap { keys, value, .. } => { + let doc_line = format!( + "Storage type is [`StorageNMap`] with keys type ({}) and value type {}.", + keys.iter() + .map(|k| k.to_token_stream().to_string()) + .collect::>() + .join(", "), + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + Metadata::CountedNMap { keys, value, .. } => { + let doc_line = format!( + "Storage type is [`CountedStorageNMap`] with keys type ({}) and value type {}.", + keys.iter() + .map(|k| k.to_token_stream().to_string()) + .collect::>() + .join(", "), + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + Metadata::CountedMap { key, value } => { + let doc_line = format!( + "Storage type is [`CountedStorageMap`] with key type {} and value type {}.", + key.to_token_stream(), + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, + }); +} + /// /// * generate StoragePrefix structs (e.g. for a storage `MyStorage` a struct with the name /// `_GeneratedPrefixForStorage$NameOfStorage` is generated) and implements StorageInstance trait. @@ -323,6 +390,8 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { Err(e) => return e.into_compile_error(), }; + augment_final_docs(def); + // Check for duplicate prefixes let mut prefix_set = HashMap::new(); let mut errors = def @@ -365,10 +434,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { if let Some(getter) = &storage.getter { let completed_where_clause = super::merge_where_clauses(&[&storage.where_clause, &def.config.where_clause]); - let docs = storage - .docs - .iter() - .map(|d| quote::quote_spanned!(storage.attr_span => #[doc = #d])); let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); @@ -378,6 +443,13 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let cfg_attrs = &storage.cfg_attrs; + // If the storage item is public, just link to it rather than copy-pasting the docs. + let getter_doc_line = if matches!(storage.vis, syn::Visibility::Public(_)) { + format!("An auto-generated getter for [`{}`].", storage.ident) + } else { + storage.docs.iter().map(|d| d.into_token_stream().to_string()).join("\n") + }; + match &storage.metadata { Metadata::Value { value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { @@ -394,7 +466,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { - #( #docs )* + #[doc = #getter_doc_line] pub fn #getter() -> #query { < #full_ident as #frame_support::storage::StorageValue<#value> @@ -418,7 +490,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { - #( #docs )* + #[doc = #getter_doc_line] pub fn #getter(k: KArg) -> #query where KArg: #frame_support::codec::EncodeLike<#key>, { @@ -444,7 +516,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { - #( #docs )* + #[doc = #getter_doc_line] pub fn #getter(k: KArg) -> #query where KArg: #frame_support::codec::EncodeLike<#key>, { @@ -470,7 +542,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { - #( #docs )* + #[doc = #getter_doc_line] pub fn #getter(k1: KArg1, k2: KArg2) -> #query where KArg1: #frame_support::codec::EncodeLike<#key1>, KArg2: #frame_support::codec::EncodeLike<#key2>, @@ -498,7 +570,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { - #( #docs )* + #[doc = #getter_doc_line] pub fn #getter(key: KArg) -> #query where KArg: #frame_support::storage::types::EncodeLikeTuple< @@ -514,6 +586,36 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { } ) }, + Metadata::CountedNMap { keygen, value, .. } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ResultQuery(error_path, _) => { + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ) + }, + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #[doc = #getter_doc_line] + pub fn #getter(key: KArg) -> #query + where + KArg: #frame_support::storage::types::EncodeLikeTuple< + <#keygen as #frame_support::storage::types::KeyGenerator>::KArg + > + + #frame_support::storage::types::TupleToEncodedIter, + { + // NOTE: we can't use any trait here because CountedStorageNMap + // doesn't implement any. + <#full_ident>::get(key) + } + } + ) + }, } } else { Default::default() @@ -530,40 +632,72 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let cfg_attrs = &storage_def.cfg_attrs; - let maybe_counter = if let Metadata::CountedMap { .. } = storage_def.metadata { - let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident); - let counter_prefix_struct_const = counter_prefix(&prefix_struct_const); - - quote::quote_spanned!(storage_def.attr_span => - #(#cfg_attrs)* - #[doc(hidden)] - #prefix_struct_vis struct #counter_prefix_struct_ident<#type_use_gen>( - core::marker::PhantomData<(#type_use_gen,)> - ); - #(#cfg_attrs)* - impl<#type_impl_gen> #frame_support::traits::StorageInstance - for #counter_prefix_struct_ident<#type_use_gen> - #config_where_clause - { - fn pallet_prefix() -> &'static str { - < - ::PalletInfo - as #frame_support::traits::PalletInfo - >::name::>() - .expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.") + let maybe_counter = match storage_def.metadata { + Metadata::CountedMap { .. } => { + let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident); + let counter_prefix_struct_const = counter_prefix(&prefix_struct_const); + quote::quote_spanned!(storage_def.attr_span => + #(#cfg_attrs)* + #[doc(hidden)] + #prefix_struct_vis struct #counter_prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #counter_prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.") + } + const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const; } - const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const; - } - #(#cfg_attrs)* - impl<#type_impl_gen> #frame_support::storage::types::CountedStorageMapInstance - for #prefix_struct_ident<#type_use_gen> - #config_where_clause - { - type CounterPrefix = #counter_prefix_struct_ident<#type_use_gen>; - } - ) - } else { - proc_macro2::TokenStream::default() + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::storage::types::CountedStorageMapInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + type CounterPrefix = #counter_prefix_struct_ident<#type_use_gen>; + } + ) + }, + Metadata::CountedNMap { .. } => { + let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident); + let counter_prefix_struct_const = counter_prefix(&prefix_struct_const); + quote::quote_spanned!(storage_def.attr_span => + #(#cfg_attrs)* + #[doc(hidden)] + #prefix_struct_vis struct #counter_prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #counter_prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.") + } + const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const; + } + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::storage::types::CountedStorageNMapInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + type CounterPrefix = #counter_prefix_struct_ident<#type_use_gen>; + } + ) + }, + _ => proc_macro2::TokenStream::default(), }; quote::quote_spanned!(storage_def.attr_span => diff --git a/frame/support/procedural/src/pallet/expand/store_trait.rs b/frame/support/procedural/src/pallet/expand/store_trait.rs index 251c88f08e7a0..6635adc988157 100644 --- a/frame/support/procedural/src/pallet/expand/store_trait.rs +++ b/frame/support/procedural/src/pallet/expand/store_trait.rs @@ -39,7 +39,7 @@ pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { let warnig_struct_name = syn::Ident::new("Store", *attribute_span); let warning: syn::ItemStruct = syn::parse_quote!( #[deprecated(note = r" - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed soon. + Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. Check https://github.com/paritytech/substrate/pull/13535 for more details.")] struct #warnig_struct_name; ); diff --git a/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index f36c765f7beb6..356bdbf67e923 100644 --- a/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -26,6 +26,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let count = COUNTER.with(|counter| counter.borrow_mut().inc()); let default_parts_unique_id = syn::Ident::new(&format!("__tt_default_parts_{}", count), def.item.span()); + let extra_parts_unique_id = + syn::Ident::new(&format!("__tt_extra_parts_{}", count), def.item.span()); let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); @@ -36,6 +38,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { quote::quote!( Event #gen , ) }); + let error_part = def.error.as_ref().map(|_| quote::quote!(Error,)); + let origin_part = def.origin.as_ref().map(|origin| { let gen = origin.is_generic.then(|| quote::quote!( )); quote::quote!( Origin #gen , ) @@ -95,8 +99,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { $($frame_support)*::tt_return! { $caller tokens = [{ - ::{ - Pallet, #call_part #storage_part #event_part #origin_part #config_part + expanded::{ + Pallet, #call_part #storage_part #event_part #error_part #origin_part #config_part #inherent_part #validate_unsigned_part #freeze_reason_part #hold_reason_part #lock_id_part #slash_reason_part } @@ -106,5 +110,33 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { } pub use #default_parts_unique_id as tt_default_parts; + + + // This macro is similar to the `tt_default_parts!`. It expands the pallets thare are declared + // explicitly (`System: frame_system::{Pallet, Call}`) with extra parts. + // + // For example, after expansion an explicit pallet would look like: + // `System: expanded::{Error} ::{Pallet, Call}`. + // + // The `expanded` keyword is a marker of the final state of the `construct_runtime!`. + #[macro_export] + #[doc(hidden)] + macro_rules! #extra_parts_unique_id { + { + $caller:tt + frame_support = [{ $($frame_support:ident)::* }] + } => { + $($frame_support)*::tt_return! { + $caller + tokens = [{ + expanded::{ + #error_part + } + }] + } + }; + } + + pub use #extra_parts_unique_id as tt_extra_parts; ) } diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 7b52c1664ba45..c60bf1f3cbea7 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -18,7 +18,7 @@ use super::helper; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::spanned::Spanned; +use syn::{spanned::Spanned, token, Token}; /// List of additional token to be used for parsing. mod keyword { @@ -27,12 +27,20 @@ mod keyword { syn::custom_keyword!(T); syn::custom_keyword!(I); syn::custom_keyword!(config); + syn::custom_keyword!(pallet); syn::custom_keyword!(IsType); syn::custom_keyword!(RuntimeEvent); syn::custom_keyword!(Event); - syn::custom_keyword!(constant); syn::custom_keyword!(frame_system); syn::custom_keyword!(disable_frame_system_supertrait_check); + syn::custom_keyword!(no_default); + syn::custom_keyword!(constant); +} + +#[derive(Default)] +pub struct DefaultTrait { + pub items: Vec, + pub has_system: bool, } /// Input definition for the pallet config. @@ -52,6 +60,12 @@ pub struct ConfigDef { pub where_clause: Option, /// The span of the pallet::config attribute. pub attr_span: proc_macro2::Span, + /// Whether a default sub-trait should be generated. + /// + /// Contains default sub-trait items (instantiated by `#[pallet::config(with_default)]`). + /// Vec will be empty if `#[pallet::config(with_default)]` is not specified or if there are + /// no trait items. + pub default_sub_trait: Option, } /// Input definition for a constant in pallet config. @@ -123,40 +137,28 @@ impl syn::parse::Parse for DisableFrameSystemSupertraitCheck { } } -/// Parse for `#[pallet::constant]` -pub struct TypeAttrConst { - pound_token: syn::Token![#], - bracket_token: syn::token::Bracket, - pallet_ident: syn::Ident, - path_sep_token: syn::token::PathSep, - constant_keyword: keyword::constant, -} - -impl syn::parse::Parse for TypeAttrConst { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let pound_token = input.parse::()?; - let content; - let bracket_token = syn::bracketed!(content in input); - let pallet_ident = content.parse::()?; - let path_sep_token = content.parse::()?; - let constant_keyword = content.parse::()?; - - Ok(Self { pound_token, bracket_token, pallet_ident, path_sep_token, constant_keyword }) - } +/// Parsing for the `typ` portion of `PalletAttr` +#[derive(derive_syn_parse::Parse, PartialEq, Eq)] +pub enum PalletAttrType { + #[peek(keyword::no_default, name = "no_default")] + NoDefault(keyword::no_default), + #[peek(keyword::constant, name = "constant")] + Constant(keyword::constant), } -impl ToTokens for TypeAttrConst { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - self.pound_token.to_tokens(tokens); - self.bracket_token.surround(tokens, |tokens| { - self.pallet_ident.to_tokens(tokens); - self.path_sep_token.to_tokens(tokens); - self.constant_keyword.to_tokens(tokens); - }) - } +/// Parsing for `#[pallet::X]` +#[derive(derive_syn_parse::Parse)] +pub struct PalletAttr { + _pound: Token![#], + #[bracket] + _bracket: token::Bracket, + #[inside(_bracket)] + _pallet: keyword::pallet, + #[prefix(Token![::] in _bracket)] + #[inside(_bracket)] + typ: PalletAttrType, } -/// Parse for `$ident::Config` pub struct ConfigBoundParse(syn::Ident); impl syn::parse::Parse for ConfigBoundParse { @@ -307,6 +309,7 @@ impl ConfigDef { attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, + enable_default: bool, ) -> syn::Result { let item = if let syn::Item::Trait(item) = item { item @@ -342,47 +345,80 @@ impl ConfigDef { false }; + let has_frame_system_supertrait = item.supertraits.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| b.0 == *frame_system) + }); + let mut has_event_type = false; let mut consts_metadata = vec![]; + let mut default_sub_trait = if enable_default { + Some(DefaultTrait { + items: Default::default(), + has_system: has_frame_system_supertrait, + }) + } else { + None + }; for trait_item in &mut item.items { - // Parse for event - has_event_type = - has_event_type || check_event_type(frame_system, trait_item, has_instance)?; + let is_event = check_event_type(frame_system, trait_item, has_instance)?; + has_event_type = has_event_type || is_event; - // Parse for constant - let type_attrs_const: Vec = helper::take_item_pallet_attrs(trait_item)?; + let mut already_no_default = false; + let mut already_constant = false; - if type_attrs_const.len() > 1 { - let msg = "Invalid attribute in pallet::config, only one attribute is expected"; - return Err(syn::Error::new(type_attrs_const[1].span(), msg)) - } - - if type_attrs_const.len() == 1 { - match trait_item { - syn::TraitItem::Type(ref type_) => { - let constant = ConstMetadataDef::try_from(type_)?; - consts_metadata.push(constant); + while let Ok(Some(pallet_attr)) = + helper::take_first_item_pallet_attr::(trait_item) + { + match (pallet_attr.typ, &trait_item) { + (PalletAttrType::Constant(_), syn::TraitItem::Type(ref typ)) => { + if already_constant { + return Err(syn::Error::new( + pallet_attr._bracket.span.join(), + "Duplicate #[pallet::constant] attribute not allowed.", + )) + } + already_constant = true; + consts_metadata.push(ConstMetadataDef::try_from(typ)?); }, - _ => { - let msg = - "Invalid pallet::constant in pallet::config, expected type trait \ - item"; - return Err(syn::Error::new(trait_item.span(), msg)) + (PalletAttrType::Constant(_), _) => + return Err(syn::Error::new( + trait_item.span(), + "Invalid #[pallet::constant] in #[pallet::config], expected type item", + )), + (PalletAttrType::NoDefault(_), _) => { + if !enable_default { + return Err(syn::Error::new( + pallet_attr._bracket.span.join(), + "`#[pallet:no_default]` can only be used if `#[pallet::config(with_default)]` \ + has been specified" + )) + } + if already_no_default { + return Err(syn::Error::new( + pallet_attr._bracket.span.join(), + "Duplicate #[pallet::no_default] attribute not allowed.", + )) + } + + already_no_default = true; }, } } + + if !already_no_default && !is_event && enable_default { + default_sub_trait + .as_mut() + .expect("is 'Some(_)' if 'enable_default'; qed") + .items + .push(trait_item.clone()); + } } let attr: Option = helper::take_first_item_pallet_attr(&mut item.attrs)?; - let disable_system_supertrait_check = attr.is_some(); - let has_frame_system_supertrait = item.supertraits.iter().any(|s| { - syn::parse2::(s.to_token_stream()) - .map_or(false, |b| b.0 == *frame_system) - }); - if !has_frame_system_supertrait && !disable_system_supertrait_check { let found = if item.supertraits.is_empty() { "none".to_string() @@ -407,6 +443,14 @@ impl ConfigDef { return Err(syn::Error::new(item.span(), msg)) } - Ok(Self { index, has_instance, consts_metadata, has_event_type, where_clause, attr_span }) + Ok(Self { + index, + has_instance, + consts_metadata, + has_event_type, + where_clause, + attr_span, + default_sub_trait, + }) } } diff --git a/frame/support/procedural/src/pallet/parse/genesis_build.rs b/frame/support/procedural/src/pallet/parse/genesis_build.rs index 6d356b2ee3844..d0e1d9ec998ec 100644 --- a/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -23,7 +23,7 @@ pub struct GenesisBuildDef { /// The index of item in pallet module. pub index: usize, /// A set of usage of instance, must be check for consistency with trait. - pub instances: Vec, + pub instances: Option>, /// The where_clause used. pub where_clause: Option, /// The span of the pallet::genesis_build attribute. @@ -53,7 +53,8 @@ impl GenesisBuildDef { })? .1; - let instances = vec![helper::check_genesis_builder_usage(item_trait)?]; + let instances = + helper::check_genesis_builder_usage(item_trait)?.map(|instances| vec![instances]); Ok(Self { attr_span, index, instances, where_clause: item.generics.where_clause.clone() }) } diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index 3cdbfb1f591d9..1e6e83d7eeba7 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -23,6 +23,7 @@ mod keyword { syn::custom_keyword!(I); syn::custom_keyword!(compact); syn::custom_keyword!(GenesisBuild); + syn::custom_keyword!(BuildGenesisConfig); syn::custom_keyword!(Config); syn::custom_keyword!(T); syn::custom_keyword!(Pallet); @@ -47,7 +48,9 @@ pub trait MutItemAttrs { } /// Take the first pallet attribute (e.g. attribute like `#[pallet..]`) and decode it to `Attr` -pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::Result> +pub(crate) fn take_first_item_pallet_attr( + item: &mut impl MutItemAttrs, +) -> syn::Result> where Attr: syn::parse::Parse, { @@ -64,7 +67,7 @@ where } /// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` -pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> +pub(crate) fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> where Attr: syn::parse::Parse, { @@ -486,26 +489,32 @@ pub fn check_type_def_gen( /// Check the syntax: /// * either `GenesisBuild` /// * or `GenesisBuild` +/// * or `BuildGenesisConfig` /// -/// return the instance if found. -pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result { +/// return the instance if found for `GenesisBuild` +/// return None for BuildGenesisConfig +pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result> { let expected = "expected `GenesisBuild` or `GenesisBuild`"; - pub struct Checker(InstanceUsage); + pub struct Checker(Option); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; - input.parse::()?; - input.parse::()?; - input.parse::()?; - if input.peek(syn::Token![,]) { - instance_usage.has_instance = true; - input.parse::()?; - input.parse::()?; + if input.peek(keyword::GenesisBuild) { + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + } + input.parse::]>()?; + return Ok(Self(Some(instance_usage))) + } else { + input.parse::()?; + return Ok(Self(None)) } - input.parse::]>()?; - - Ok(Self(instance_usage)) } } diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index 770cba68c1aad..0f5e5f1136610 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -100,8 +100,14 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span)) if config.is_none() => - config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?), + Some(PalletAttr::Config(span, with_default)) if config.is_none() => + config = Some(config::ConfigDef::try_from( + &frame_system, + span, + index, + item, + with_default, + )?), Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; pallet_struct = Some(p); @@ -262,7 +268,7 @@ impl Def { instances.extend_from_slice(&genesis_config.instances[..]); } if let Some(genesis_build) = &self.genesis_build { - instances.extend_from_slice(&genesis_build.instances[..]); + genesis_build.instances.as_ref().map(|i| instances.extend_from_slice(&i)); } if let Some(extra_constants) = &self.extra_constants { instances.extend_from_slice(&extra_constants.instances[..]); @@ -405,6 +411,7 @@ mod keyword { syn::custom_keyword!(weight); syn::custom_keyword!(event); syn::custom_keyword!(config); + syn::custom_keyword!(with_default); syn::custom_keyword!(hooks); syn::custom_keyword!(inherent); syn::custom_keyword!(error); @@ -423,7 +430,7 @@ mod keyword { /// Parse attributes for item in pallet module /// syntax must be `pallet::` (e.g. `#[pallet::config]`) enum PalletAttr { - Config(proc_macro2::Span), + Config(proc_macro2::Span, bool), Pallet(proc_macro2::Span), Hooks(proc_macro2::Span), /// A `#[pallet::call]` with optional attributes to specialize the behaviour. @@ -480,7 +487,7 @@ enum PalletAttr { impl PalletAttr { fn span(&self) -> proc_macro2::Span { match self { - Self::Config(span) => *span, + Self::Config(span, _) => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, Self::RuntimeCall(_, span) => *span, @@ -509,7 +516,14 @@ impl syn::parse::Parse for PalletAttr { let lookahead = content.lookahead1(); if lookahead.peek(keyword::config) { - Ok(PalletAttr::Config(content.parse::()?.span())) + let span = content.parse::()?.span(); + let with_default = content.peek(syn::token::Paren); + if with_default { + let inside_config; + let _paren = syn::parenthesized!(inside_config in content); + inside_config.parse::()?; + } + Ok(PalletAttr::Config(span, with_default)) } else if lookahead.peek(keyword::pallet) { Ok(PalletAttr::Pallet(content.parse::()?.span())) } else if lookahead.peek(keyword::hooks) { diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 12e06b214b6b6..3a0ec4747153a 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -138,6 +138,7 @@ pub enum Metadata { CountedMap { value: syn::Type, key: syn::Type }, DoubleMap { value: syn::Type, key1: syn::Type, key2: syn::Type }, NMap { keys: Vec, keygen: syn::Type, value: syn::Type }, + CountedNMap { keys: Vec, keygen: syn::Type, value: syn::Type }, } pub enum QueryKind { @@ -230,6 +231,13 @@ pub enum StorageGenerics { on_empty: Option, max_values: Option, }, + CountedNMap { + keygen: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, } impl StorageGenerics { @@ -242,6 +250,8 @@ impl StorageGenerics { Self::Value { value, .. } => Metadata::Value { value }, Self::NMap { keygen, value, .. } => Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, + Self::CountedNMap { keygen, value, .. } => + Metadata::CountedNMap { keys: collect_keys(&keygen)?, keygen, value }, }; Ok(res) @@ -254,7 +264,8 @@ impl StorageGenerics { Self::Map { query_kind, .. } | Self::CountedMap { query_kind, .. } | Self::Value { query_kind, .. } | - Self::NMap { query_kind, .. } => query_kind.clone(), + Self::NMap { query_kind, .. } | + Self::CountedNMap { query_kind, .. } => query_kind.clone(), } } } @@ -265,6 +276,7 @@ enum StorageKind { CountedMap, DoubleMap, NMap, + CountedNMap, } /// Check the generics in the `map` contains the generics in `gen` may contains generics in @@ -493,6 +505,29 @@ fn process_named_generics( max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } }, + StorageKind::CountedNMap => { + check_generics( + &parsed, + &["Key", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "CountedStorageNMap", + args_span, + )?; + + StorageGenerics::CountedNMap { + keygen: parsed + .remove("Key") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, }; let metadata = generics.metadata()?; @@ -578,6 +613,16 @@ fn process_unnamed_generics( false, ) }, + StorageKind::CountedNMap => { + let keygen = retrieve_arg(1)?; + let keys = collect_keys(&keygen)?; + ( + None, + Metadata::CountedNMap { keys, keygen, value: retrieve_arg(2)? }, + retrieve_arg(3).ok(), + false, + ) + }, }; Ok(res) @@ -594,10 +639,11 @@ fn process_generics( "CountedStorageMap" => StorageKind::CountedMap, "StorageDoubleMap" => StorageKind::DoubleMap, "StorageNMap" => StorageKind::NMap, + "CountedStorageNMap" => StorageKind::CountedNMap, found => { let msg = format!( "Invalid pallet::storage, expected ident: `StorageValue` or \ - `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` \ + `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` or `CountedStorageNMap` \ in order to expand metadata, found `{}`.", found, ); diff --git a/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/frame/support/procedural/src/storage/genesis_config/builder_def.rs deleted file mode 100644 index 5b8e4eb7603fc..0000000000000 --- a/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ /dev/null @@ -1,151 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Builder logic definition used to build genesis storage. - -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; -use frame_support_procedural_tools::syn_ext as ext; -use proc_macro2::TokenStream; -use quote::{quote, quote_spanned}; -use syn::spanned::Spanned; - -/// Definition of builder blocks, each block insert some value in the storage. -/// They must be called inside externalities, and with `self` being the genesis config. -pub struct BuilderDef { - /// Contains: - /// * build block for storage with build attribute. - /// * build block for storage with config attribute and no build attribute. - /// * build block for extra genesis build expression. - pub blocks: Vec, - /// The build blocks requires generic traits. - pub is_generic: bool, -} - -impl BuilderDef { - pub fn from_def(scrate: &TokenStream, def: &DeclStorageDefExt) -> Self { - let mut blocks = Vec::new(); - let mut is_generic = false; - - for line in def.storage_lines.iter() { - let storage_struct = &line.storage_struct; - let storage_trait = &line.storage_trait; - let value_type = &line.value_type; - - // Defines the data variable to use for insert at genesis either from build or config. - let mut data = None; - - if let Some(builder) = &line.build { - is_generic |= ext::expr_contains_ident(builder, &def.module_runtime_generic); - is_generic |= line.is_generic; - - data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => { - quote_spanned!(builder.span() => - // NOTE: the type of `data` is specified when used later in the code - let builder: fn(&Self) -> _ = #builder; - let data = builder(self); - let data = Option::as_ref(&data); - ) - }, - _ => quote_spanned!(builder.span() => - // NOTE: the type of `data` is specified when used later in the code - let builder: fn(&Self) -> _ = #builder; - let data = &builder(self); - ), - }); - } else if let Some(config) = &line.config { - is_generic |= line.is_generic; - - data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => { - quote!( let data = Some(&self.#config); ) - }, - _ => quote!( let data = &self.#config; ), - }); - }; - - if let Some(data) = data { - blocks.push(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => { - quote! {{ - #data - let v: Option<&#value_type>= data; - if let Some(v) = v { - <#storage_struct as #scrate::#storage_trait>::put::<&#value_type>(v); - } - }} - }, - StorageLineTypeDef::Simple(_) if !line.is_option => { - quote! {{ - #data - let v: &#value_type = data; - <#storage_struct as #scrate::#storage_trait>::put::<&#value_type>(v); - }} - }, - StorageLineTypeDef::Simple(_) => unreachable!(), - StorageLineTypeDef::Map(map) => { - let key = &map.key; - quote! {{ - #data - let data: &#scrate::sp_std::vec::Vec<(#key, #value_type)> = data; - data.iter().for_each(|(k, v)| { - <#storage_struct as #scrate::#storage_trait>::insert::< - &#key, &#value_type - >(k, v); - }); - }} - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - quote! {{ - #data - let data: &#scrate::sp_std::vec::Vec<(#key1, #key2, #value_type)> = data; - data.iter().for_each(|(k1, k2, v)| { - <#storage_struct as #scrate::#storage_trait>::insert::< - &#key1, &#key2, &#value_type - >(k1, k2, v); - }); - }} - }, - StorageLineTypeDef::NMap(map) => { - let key_tuple = map.to_key_tuple(); - let key_arg = if map.keys.len() == 1 { quote!((k,)) } else { quote!(k) }; - quote! {{ - #data - let data: &#scrate::sp_std::vec::Vec<(#key_tuple, #value_type)> = data; - data.iter().for_each(|(k, v)| { - <#storage_struct as #scrate::#storage_trait>::insert(#key_arg, v); - }); - }} - }, - }); - } - } - - if let Some(builder) = def.extra_genesis_build.as_ref() { - is_generic |= ext::expr_contains_ident(builder, &def.module_runtime_generic); - - blocks.push(quote_spanned! { builder.span() => - let extra_genesis_builder: fn(&Self) = #builder; - extra_genesis_builder(self); - }); - } - - Self { blocks, is_generic } - } -} diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs deleted file mode 100644 index 31e9996ee51f4..0000000000000 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ /dev/null @@ -1,162 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Genesis config definition. - -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; -use frame_support_procedural_tools::syn_ext as ext; -use proc_macro2::TokenStream; -use quote::quote; -use syn::{parse_quote, spanned::Spanned}; - -pub struct GenesisConfigFieldDef { - pub name: syn::Ident, - pub typ: syn::Type, - pub attrs: Vec, - pub default: TokenStream, -} - -pub struct GenesisConfigDef { - pub is_generic: bool, - pub fields: Vec, - /// For example: `, I: Instance=DefaultInstance>`. - pub genesis_struct_decl: TokenStream, - /// For example: ``. - pub genesis_struct: TokenStream, - /// For example: `, I: Instance>`. - pub genesis_impl: TokenStream, - /// The where clause to use to constrain generics if genesis config is generic. - pub genesis_where_clause: Option, -} - -impl GenesisConfigDef { - pub fn from_def(def: &DeclStorageDefExt) -> syn::Result { - let fields = Self::get_genesis_config_field_defs(def)?; - - let is_generic = fields - .iter() - .any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic)); - - let (genesis_struct_decl, genesis_impl, genesis_struct, genesis_where_clause) = - if is_generic { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance_bound_optional_default = - &def.optional_instance_bound_optional_default; - let where_clause = &def.where_clause; - ( - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), - quote!(<#runtime_generic, #optional_instance>), - where_clause.clone(), - ) - } else { - (quote!(), quote!(), quote!(), None) - }; - - Ok(Self { - is_generic, - fields, - genesis_struct_decl, - genesis_struct, - genesis_impl, - genesis_where_clause, - }) - } - - fn get_genesis_config_field_defs( - def: &DeclStorageDefExt, - ) -> syn::Result> { - let mut config_field_defs = Vec::new(); - - for (config_field, line) in def.storage_lines.iter().filter_map(|line| { - line.config.as_ref().map(|config_field| (config_field.clone(), line)) - }) { - let value_type = &line.value_type; - - let typ = match &line.storage_type { - StorageLineTypeDef::Simple(_) => (*value_type).clone(), - StorageLineTypeDef::Map(map) => { - let key = &map.key; - parse_quote!( Vec<(#key, #value_type)> ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - - parse_quote!( Vec<(#key1, #key2, #value_type)> ) - }, - StorageLineTypeDef::NMap(map) => { - let key_tuple = map.to_key_tuple(); - parse_quote!( Vec<(#key_tuple, #value_type)> ) - }, - }; - - let default = - line.default_value - .as_ref() - .map(|d| { - if line.is_option { - quote!( #d.unwrap_or_default() ) - } else { - quote!( #d ) - } - }) - .unwrap_or_else(|| quote!(Default::default())); - - config_field_defs.push(GenesisConfigFieldDef { - name: config_field, - typ, - attrs: line.doc_attrs.clone(), - default, - }); - } - - for line in &def.extra_genesis_config_lines { - let attrs = line - .attrs - .iter() - .map(|attr| { - if attr.meta.path().is_ident("cfg") { - return Err(syn::Error::new( - attr.meta.span(), - "extra genesis config items do not support `cfg` attribute", - )) - } - Ok(attr.meta.clone()) - }) - .collect::>()?; - - let default = line - .default - .as_ref() - .map(|e| quote!( #e )) - .unwrap_or_else(|| quote!(Default::default())); - - config_field_defs.push(GenesisConfigFieldDef { - name: line.name.clone(), - typ: line.typ.clone(), - attrs, - default, - }); - } - - Ok(config_field_defs) - } -} diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs deleted file mode 100644 index 57c938368bc90..0000000000000 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ /dev/null @@ -1,196 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Declaration of genesis config structure and implementation of build storage trait and -//! functions. - -use super::DeclStorageDefExt; -pub use builder_def::BuilderDef; -pub use genesis_config_def::GenesisConfigDef; -use proc_macro2::{Span, TokenStream}; -use quote::quote; - -mod builder_def; -mod genesis_config_def; - -const DEFAULT_INSTANCE_NAME: &str = "__GeneratedInstance"; - -fn decl_genesis_config_and_impl_default( - scrate: &TokenStream, - genesis_config: &GenesisConfigDef, -) -> TokenStream { - let config_fields = genesis_config.fields.iter().map(|field| { - let (name, typ, attrs) = (&field.name, &field.typ, &field.attrs); - quote!( #( #[ #attrs] )* pub #name: #typ, ) - }); - - let config_field_defaults = genesis_config.fields.iter().map(|field| { - let (name, default) = (&field.name, &field.default); - quote!( #name: #default, ) - }); - - let serde_bug_bound = if !genesis_config.fields.is_empty() { - let mut b_ser = String::new(); - let mut b_dser = String::new(); - - for typ in genesis_config.fields.iter().map(|c| &c.typ) { - let typ = quote!( #typ ); - b_ser.push_str(&format!("{} : {}::serde::Serialize, ", typ, scrate)); - b_dser.push_str(&format!("{} : {}::serde::de::DeserializeOwned, ", typ, scrate)); - } - - quote! { - #[serde(bound(serialize = #b_ser))] - #[serde(bound(deserialize = #b_dser))] - } - } else { - quote!() - }; - - let genesis_struct_decl = &genesis_config.genesis_struct_decl; - let genesis_struct = &genesis_config.genesis_struct; - let genesis_impl = &genesis_config.genesis_impl; - let genesis_where_clause = &genesis_config.genesis_where_clause; - let serde_crate = format!("{}::serde", scrate); - - quote!( - /// Genesis config for the module, allow to build genesis storage. - #[derive(#scrate::Serialize, #scrate::Deserialize)] - #[cfg(feature = "std")] - #[serde(rename_all = "camelCase")] - #[serde(deny_unknown_fields)] - #[serde(crate = #serde_crate)] - #serde_bug_bound - pub struct GenesisConfig #genesis_struct_decl #genesis_where_clause { - #( #config_fields )* - } - - #[cfg(feature = "std")] - impl #genesis_impl Default for GenesisConfig #genesis_struct #genesis_where_clause { - fn default() -> Self { - GenesisConfig { - #( #config_field_defaults )* - } - } - } - ) -} - -fn impl_build_storage( - scrate: &TokenStream, - def: &DeclStorageDefExt, - genesis_config: &GenesisConfigDef, - builders: &BuilderDef, -) -> TokenStream { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let where_clause = &def.where_clause; - - let inherent_instance = def.optional_instance.clone().unwrap_or_else(|| { - let name = syn::Ident::new(DEFAULT_INSTANCE_NAME, Span::call_site()); - quote!( #name ) - }); - let inherent_instance_bound = quote!( - #inherent_instance: #scrate::traits::Instance - ); - - let build_storage_impl = quote!( - <#runtime_generic: #runtime_trait, #inherent_instance_bound> - ); - - let genesis_struct = &genesis_config.genesis_struct; - let genesis_impl = &genesis_config.genesis_impl; - let genesis_where_clause = &genesis_config.genesis_where_clause; - - let (fn_generic, fn_traitinstance, fn_where_clause) = - if !genesis_config.is_generic && builders.is_generic { - ( - quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), - quote!( #runtime_generic, #optional_instance ), - Some(&def.where_clause), - ) - } else { - (quote!(), quote!(), None) - }; - - let builder_blocks = &builders.blocks; - - let build_storage_impl_trait = quote!( - #scrate::sp_runtime::BuildModuleGenesisStorage<#runtime_generic, #inherent_instance> - ); - - quote! { - #[cfg(feature = "std")] - impl #genesis_impl GenesisConfig #genesis_struct #genesis_where_clause { - /// Build the storage for this module. - pub fn build_storage #fn_generic (&self) -> std::result::Result< - #scrate::sp_runtime::Storage, - String - > #fn_where_clause { - let mut storage = Default::default(); - self.assimilate_storage::<#fn_traitinstance>(&mut storage)?; - Ok(storage) - } - - /// Assimilate the storage for this module into pre-existing overlays. - pub fn assimilate_storage #fn_generic ( - &self, - storage: &mut #scrate::sp_runtime::Storage, - ) -> std::result::Result<(), String> #fn_where_clause { - #scrate::BasicExternalities::execute_with_storage(storage, || { - #( #builder_blocks )* - Ok(()) - }) - } - } - - #[cfg(feature = "std")] - impl #build_storage_impl #build_storage_impl_trait for GenesisConfig #genesis_struct - #where_clause - { - fn build_module_genesis_storage( - &self, - storage: &mut #scrate::sp_runtime::Storage, - ) -> std::result::Result<(), String> { - self.assimilate_storage::<#fn_traitinstance> (storage) - } - } - } -} - -pub fn genesis_config_and_build_storage(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - let builders = BuilderDef::from_def(scrate, def); - if !builders.blocks.is_empty() { - let genesis_config = match GenesisConfigDef::from_def(def) { - Ok(genesis_config) => genesis_config, - Err(err) => return err.to_compile_error(), - }; - let decl_genesis_config_and_impl_default = - decl_genesis_config_and_impl_default(scrate, &genesis_config); - let impl_build_storage = impl_build_storage(scrate, def, &genesis_config, &builders); - - quote! { - #decl_genesis_config_and_impl_default - #impl_build_storage - } - } else { - quote!() - } -} diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs deleted file mode 100644 index 428b2ec453407..0000000000000 --- a/frame/support/procedural/src/storage/getters.rs +++ /dev/null @@ -1,99 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of getters on module structure. - -use super::{DeclStorageDefExt, StorageLineTypeDef}; -use proc_macro2::TokenStream; -use quote::quote; - -pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - let mut getters = TokenStream::new(); - - for (get_fn, line) in def - .storage_lines - .iter() - .filter_map(|line| line.getter.as_ref().map(|get_fn| (get_fn, line))) - { - let attrs = &line.doc_attrs; - - let storage_struct = &line.storage_struct; - let storage_trait = &line.storage_trait; - - let getter = match &line.storage_type { - StorageLineTypeDef::Simple(value) => { - quote! { - #( #[ #attrs ] )* - pub fn #get_fn() -> #value { - <#storage_struct as #scrate::#storage_trait>::get() - } - } - }, - StorageLineTypeDef::Map(map) => { - let key = &map.key; - let value = &map.value; - quote! { - #( #[ #attrs ] )* - pub fn #get_fn>(key: K) -> #value { - <#storage_struct as #scrate::#storage_trait>::get(key) - } - } - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - let value = &map.value; - quote! { - pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value - where - KArg1: #scrate::codec::EncodeLike<#key1>, - KArg2: #scrate::codec::EncodeLike<#key2>, - { - <#storage_struct as #scrate::#storage_trait>::get(k1, k2) - } - } - }, - StorageLineTypeDef::NMap(map) => { - let keygen = map.to_keygen_struct(&def.hidden_crate); - let value = &map.value; - quote! { - pub fn #get_fn(key: KArg) -> #value - where - KArg: #scrate::storage::types::EncodeLikeTuple< - <#keygen as #scrate::storage::types::KeyGenerator>::KArg - > - + #scrate::storage::types::TupleToEncodedIter, - { - <#storage_struct as #scrate::#storage_trait>::get(key) - } - } - }, - }; - getters.extend(getter); - } - - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - impl #module_impl #module_struct #where_clause { - #getters - } - ) -} diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs deleted file mode 100644 index 8b5aa500550ef..0000000000000 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ /dev/null @@ -1,139 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of the trait instance and the instance structures implementing it. -//! (For not instantiable traits there is still the inherent instance implemented). - -use super::DeclStorageDefExt; -use crate::NUMBER_OF_INSTANCE; -use proc_macro2::{Span, TokenStream}; -use quote::quote; - -pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; - -// Used to generate an instance implementation. -struct InstanceDef { - prefix: String, - instance_struct: syn::Ident, - doc: TokenStream, - // Index is same as instance number. Default is 0. - index: u8, -} - -pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - let mut impls = TokenStream::new(); - - impls.extend(reexport_instance_trait(scrate, def)); - - // Implementation of instances. - if let Some(module_instance) = &def.module_instance { - let instance_defs = (1..=NUMBER_OF_INSTANCE) - .map(|i| { - let name = format!("Instance{}", i); - InstanceDef { - instance_struct: syn::Ident::new(&name, proc_macro2::Span::call_site()), - prefix: name, - doc: quote!(#[doc=r"Module instance"]), - index: i, - } - }) - .chain(module_instance.instance_default.as_ref().map(|ident| InstanceDef { - prefix: String::new(), - instance_struct: ident.clone(), - doc: quote!(#[doc=r"Default module instance"]), - index: 0, - })); - - for instance_def in instance_defs { - impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); - } - } - - // The name of the inherently available instance. - let inherent_instance = syn::Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()); - - // Implementation of inherent instance. - if let Some(default_instance) = - def.module_instance.as_ref().and_then(|i| i.instance_default.as_ref()) - { - impls.extend(quote! { - /// Hidden instance generated to be internally used when module is used without - /// instance. - #[doc(hidden)] - pub type #inherent_instance = #default_instance; - }); - } else { - let instance_def = InstanceDef { - prefix: String::new(), - instance_struct: inherent_instance, - doc: quote!( - /// Hidden instance generated to be internally used when module is used without - /// instance. - #[doc(hidden)] - ), - // This is just to make the type system happy. Not actually used. - index: 0, - }; - impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); - } - - impls -} - -fn reexport_instance_trait(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { - if let Some(i) = def.module_instance.as_ref() { - let instance_trait = &i.instance_trait; - quote!( - /// Local import of frame_support::traits::Instance - // This import is not strictly needed but made in order not to have breaking change. - use #scrate::traits::Instance as #instance_trait; - ) - } else { - quote!() - } -} - -fn create_and_impl_instance_struct( - scrate: &TokenStream, - instance_def: &InstanceDef, - def: &DeclStorageDefExt, -) -> TokenStream { - let instance_trait = quote!( #scrate::traits::Instance ); - - let instance_struct = &instance_def.instance_struct; - let prefix = format!("{}{}", instance_def.prefix, def.crate_name); - let doc = &instance_def.doc; - let index = instance_def.index; - - quote! { - // Those trait are derived because of wrong bounds for generics - #[derive( - Clone, Eq, PartialEq, - #scrate::codec::Encode, - #scrate::codec::Decode, - #scrate::scale_info::TypeInfo, - #scrate::RuntimeDebug, - )] - #doc - pub struct #instance_struct; - impl #instance_trait for #instance_struct { - const PREFIX: &'static str = #prefix; - const INDEX: u8 = #index; - } - } -} diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs deleted file mode 100644 index 5561d0564597b..0000000000000 --- a/frame/support/procedural/src/storage/metadata.rs +++ /dev/null @@ -1,217 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of `storage_metadata` on module structure, used by construct_runtime. - -use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; -use frame_support_procedural_tools::get_doc_literals; -use proc_macro2::TokenStream; -use quote::quote; - -fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> TokenStream { - let value_type = &line.value_type; - match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote! { - #scrate::metadata_ir::StorageEntryTypeIR::Plain( - #scrate::scale_info::meta_type::<#value_type>() - ) - } - }, - StorageLineTypeDef::Map(map) => { - let hasher = map.hasher.into_metadata(); - let key = &map.key; - quote! { - #scrate::metadata_ir::StorageEntryTypeIR::Map { - hashers: #scrate::sp_std::vec! [ #scrate::metadata_ir::#hasher ], - key: #scrate::scale_info::meta_type::<#key>(), - value: #scrate::scale_info::meta_type::<#value_type>(), - } - } - }, - StorageLineTypeDef::DoubleMap(map) => { - let hasher1 = map.hasher1.into_metadata(); - let hasher2 = map.hasher2.into_metadata(); - let key1 = &map.key1; - let key2 = &map.key2; - quote! { - #scrate::metadata_ir::StorageEntryTypeIR::Map { - hashers: #scrate::sp_std::vec! [ - #scrate::metadata_ir::#hasher1, - #scrate::metadata_ir::#hasher2, - ], - key: #scrate::scale_info::meta_type::<(#key1, #key2)>(), - value: #scrate::scale_info::meta_type::<#value_type>(), - } - } - }, - StorageLineTypeDef::NMap(map) => { - let key_tuple = &map.to_key_tuple(); - let hashers = map - .hashers - .iter() - .map(|hasher| hasher.to_storage_hasher_struct()) - .collect::>(); - quote! { - #scrate::metadata_ir::StorageEntryTypeIR::Map { - hashers: #scrate::sp_std::vec! [ - #( #scrate::metadata_ir::StorageHasherIR::#hashers, )* - ], - key: #scrate::scale_info::meta_type::<#key_tuple>(), - value: #scrate::scale_info::meta_type::<#value_type>(), - } - } - }, - } -} - -fn default_byte_getter( - scrate: &TokenStream, - line: &StorageLineDefExt, - def: &DeclStorageDefExt, -) -> (TokenStream, TokenStream) { - let default = line - .default_value - .as_ref() - .map(|d| quote!( #d )) - .unwrap_or_else(|| quote!(Default::default())); - - let str_name = line.name.to_string(); - let struct_name = - syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); - let cache_name = - syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); - - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance = &def.optional_instance; - let optional_comma_instance = optional_instance.as_ref().map(|i| quote!(, #i)); - let where_clause = &def.where_clause; - - let query_type = &line.query_type; - - let struct_def = quote! { - #[doc(hidden)] - pub struct #struct_name< - #runtime_generic, #optional_instance_bound_optional_default - >(pub #scrate::sp_std::marker::PhantomData<(#runtime_generic #optional_comma_instance)>); - - #[cfg(feature = "std")] - #[allow(non_upper_case_globals)] - static #cache_name: #scrate::once_cell::sync::OnceCell<#scrate::sp_std::vec::Vec> = - #scrate::once_cell::sync::OnceCell::new(); - - #[cfg(feature = "std")] - impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #struct_name<#runtime_generic, #optional_instance> - #where_clause - { - fn default_byte(&self) -> #scrate::sp_std::vec::Vec { - use #scrate::codec::Encode; - #cache_name.get_or_init(|| { - let def_val: #query_type = #default; - <#query_type as Encode>::encode(&def_val) - }).clone() - } - } - - #[cfg(not(feature = "std"))] - impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #struct_name<#runtime_generic, #optional_instance> - #where_clause - { - fn default_byte(&self) -> #scrate::sp_std::vec::Vec { - use #scrate::codec::Encode; - let def_val: #query_type = #default; - <#query_type as Encode>::encode(&def_val) - } - } - }; - let struct_instance = quote!( - #struct_name::<#runtime_generic, #optional_instance>(#scrate::sp_std::marker::PhantomData) - ); - - (struct_def, struct_instance) -} - -pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - let mut entries = TokenStream::new(); - let mut default_byte_getter_struct_defs = TokenStream::new(); - - for line in def.storage_lines.iter() { - let str_name = line.name.to_string(); - - let modifier = if line.is_option { - quote!(#scrate::metadata_ir::StorageEntryModifierIR::Optional) - } else { - quote!(#scrate::metadata_ir::StorageEntryModifierIR::Default) - }; - - let ty = storage_line_metadata_type(scrate, line); - - let (default_byte_getter_struct_def, default_byte_getter_struct_instance) = - default_byte_getter(scrate, line, def); - - let docs = get_doc_literals(&line.attrs); - - let entry = quote! { - #scrate::metadata_ir::StorageEntryMetadataIR { - name: #str_name, - modifier: #modifier, - ty: #ty, - default: #default_byte_getter_struct_instance.default_byte(), - docs: #scrate::sp_std::vec![ #( #docs ),* ], - }, - }; - - default_byte_getter_struct_defs.extend(default_byte_getter_struct_def); - entries.extend(entry); - } - - let prefix = if let Some(instance) = &def.module_instance { - let instance_generic = &instance.instance_generic; - quote!(#instance_generic::PREFIX) - } else { - let prefix = def.crate_name.to_string(); - quote!(#prefix) - }; - - let store_metadata = quote!( - #scrate::metadata_ir::PalletStorageMetadataIR { - prefix: #prefix, - entries: #scrate::sp_std::vec![ #entries ], - } - ); - - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - #default_byte_getter_struct_defs - - impl #module_impl #module_struct #where_clause { - #[doc(hidden)] - pub fn storage_metadata() -> #scrate::metadata_ir::PalletStorageMetadataIR { - #store_metadata - } - } - ) -} diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs deleted file mode 100644 index c04862256a7bc..0000000000000 --- a/frame/support/procedural/src/storage/mod.rs +++ /dev/null @@ -1,507 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! `decl_storage` input definition and expansion. - -mod genesis_config; -mod getters; -mod instance_trait; -mod metadata; -mod parse; -mod print_pallet_upgrade; -mod storage_info; -mod storage_struct; -mod store_trait; - -pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; - -use frame_support_procedural_tools::{ - generate_crate_access, generate_hidden_includes, syn_ext as ext, -}; - -use quote::quote; - -/// All information contained in input of decl_storage -pub struct DeclStorageDef { - /// Whether to generate the storage info - generate_storage_info: bool, - /// Name of the module used to import hidden imports. - hidden_crate: Option, - /// Visibility of store trait. - visibility: syn::Visibility, - /// Name of store trait: usually `Store`. - store_trait: syn::Ident, - /// Module name used by construct_runtime: usually `Module`. - module_name: syn::Ident, - /// Usually `T`. - module_runtime_generic: syn::Ident, - /// Usually `Config` - module_runtime_trait: syn::Path, - /// For instantiable module: usually `I: Instance=DefaultInstance`. - module_instance: Option, - /// Where claused used to constrain T and I even more. - where_clause: Option, - /// The extra build function used to build storage at genesis. - extra_genesis_build: Option, - /// The extra genesis config fields. - extra_genesis_config_lines: Vec, - /// Definition of storages. - storage_lines: Vec, - /// Name of the crate, used for storage prefixes. - crate_name: syn::Ident, -} - -impl syn::parse::Parse for DeclStorageDef { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - parse::parse(input) - } -} - -/// Extended version of `DeclStorageDef` with useful precomputed value. -pub struct DeclStorageDefExt { - /// Whether to generate the storage info - generate_storage_info: bool, - /// Name of the module used to import hidden imports. - hidden_crate: proc_macro2::TokenStream, - /// Hidden imports used by the module. - hidden_imports: proc_macro2::TokenStream, - /// Visibility of store trait. - visibility: syn::Visibility, - /// Name of store trait: usually `Store`. - store_trait: syn::Ident, - /// Module name used by construct_runtime: usually `Module`. - #[allow(unused)] - module_name: syn::Ident, - /// Usually `T`. - module_runtime_generic: syn::Ident, - /// Usually `Config`. - module_runtime_trait: syn::Path, - /// For instantiable module: usually `I: Instance=DefaultInstance`. - module_instance: Option, - /// Where claused used to constrain T and I even more. - where_clause: Option, - /// The extra build function used to build storage at genesis. - extra_genesis_build: Option, - /// The extra genesis config fields. - extra_genesis_config_lines: Vec, - /// Definition of storages. - storage_lines: Vec, - /// Name of the crate, used for storage prefixes. - crate_name: syn::Ident, - /// Full struct expansion: `Module`. - module_struct: proc_macro2::TokenStream, - /// Impl block for module: ``. - module_impl: proc_macro2::TokenStream, - /// For instantiable: `I`. - optional_instance: Option, - /// For instantiable: `I: Instance`. - optional_instance_bound: Option, - /// For instantiable: `I: Instance = DefaultInstance`. - optional_instance_bound_optional_default: Option, -} - -impl From for DeclStorageDefExt { - fn from(mut def: DeclStorageDef) -> Self { - let hidden_crate_name = def - .hidden_crate - .as_ref() - .map(|i| i.to_string()) - .unwrap_or_else(|| "decl_storage".to_string()); - - let hidden_crate = generate_crate_access(&hidden_crate_name, "frame-support"); - let hidden_imports = generate_hidden_includes(&hidden_crate_name, "frame-support"); - - let storage_lines = def.storage_lines.drain(..).collect::>(); - let storage_lines = storage_lines - .into_iter() - .map(|line| StorageLineDefExt::from_def(line, &def, &hidden_crate)) - .collect(); - - let (optional_instance, optional_instance_bound, optional_instance_bound_optional_default) = - if let Some(instance) = def.module_instance.as_ref() { - let instance_generic = &instance.instance_generic; - let instance_trait = &instance.instance_trait; - let optional_equal_instance_default = - instance.instance_default.as_ref().map(|d| quote!( = #d )); - ( - Some(quote!(#instance_generic)), - Some(quote!(#instance_generic: #instance_trait)), - Some( - quote!(#instance_generic: #instance_trait #optional_equal_instance_default), - ), - ) - } else { - (None, None, None) - }; - - let module_runtime_generic = &def.module_runtime_generic; - let module_runtime_trait = &def.module_runtime_trait; - let module_name = &def.module_name; - - let module_struct = quote!( - #module_name<#module_runtime_generic, #optional_instance> - ); - - let module_impl = quote!( - <#module_runtime_generic: #module_runtime_trait + 'static, #optional_instance_bound> - ); - - Self { - hidden_crate, - hidden_imports, - generate_storage_info: def.generate_storage_info, - visibility: def.visibility, - store_trait: def.store_trait, - module_name: def.module_name, - module_runtime_generic: def.module_runtime_generic, - module_runtime_trait: def.module_runtime_trait, - module_instance: def.module_instance, - where_clause: def.where_clause, - extra_genesis_build: def.extra_genesis_build, - extra_genesis_config_lines: def.extra_genesis_config_lines, - crate_name: def.crate_name, - storage_lines, - module_struct, - module_impl, - optional_instance, - optional_instance_bound, - optional_instance_bound_optional_default, - } - } -} - -/// Usually `I: Instance=DefaultInstance`. -pub struct ModuleInstanceDef { - /// Usually: `I`. - instance_generic: syn::Ident, - /// Usually: `Instance`. - instance_trait: syn::Ident, - /// Usually: `DefaultInstance`. - instance_default: Option, -} - -pub struct StorageLineDef { - attrs: Vec, - /// Visibility of the storage struct. - visibility: syn::Visibility, - name: syn::Ident, - /// The name of getter function to be implemented on Module struct. - getter: Option, - /// The name of the field to be used in genesis config if any. - config: Option, - /// The given max values with `max_values` attribute, or a none if not specified. - max_values: Option, - /// The build function of the storage if any. - build: Option, - /// Default value of genesis config field and also for storage when no value available. - default_value: Option, - storage_type: StorageLineTypeDef, -} - -pub struct StorageLineDefExt { - #[allow(unused)] - attrs: Vec, - /// Visibility of the storage struct. - visibility: syn::Visibility, - name: syn::Ident, - /// The name of getter function to be implemented on Module struct. - getter: Option, - /// The name of the field to be used in genesis config if any. - config: Option, - /// The given max values with `max_values` attribute, or a none if not specified. - max_values: Option, - /// The build function of the storage if any. - build: Option, - /// Default value of genesis config field and also for storage when no value available. - default_value: Option, - storage_type: StorageLineTypeDef, - doc_attrs: Vec, - /// Either the type stored in storage or wrapped in an Option. - query_type: syn::Type, - /// The type stored in storage. - value_type: syn::Type, - /// Full struct, for example: `StorageName`. - storage_struct: proc_macro2::TokenStream, - /// If storage is generic over runtime then `T`. - optional_storage_runtime_comma: Option, - /// If storage is generic over runtime then `T: Config`. - optional_storage_runtime_bound_comma: Option, - /// The where clause to use to constrain generics if storage is generic over runtime. - optional_storage_where_clause: Option, - /// Full trait, for example: `storage::StorageMap`. - storage_trait: proc_macro2::TokenStream, - /// Full trait, for example: `storage::generator::StorageMap`. - storage_generator_trait: proc_macro2::TokenStream, - /// Whether the storage is generic. - is_generic: bool, - /// Whether the storage value is an option. - is_option: bool, -} - -impl StorageLineDefExt { - fn from_def( - storage_def: StorageLineDef, - def: &DeclStorageDef, - hidden_crate: &proc_macro2::TokenStream, - ) -> Self { - let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => - ext::type_contains_ident(value, &def.module_runtime_generic), - StorageLineTypeDef::Map(map) => - ext::type_contains_ident(&map.key, &def.module_runtime_generic) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), - StorageLineTypeDef::DoubleMap(map) => - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || - ext::type_contains_ident(&map.key2, &def.module_runtime_generic) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), - StorageLineTypeDef::NMap(map) => - map.keys - .iter() - .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) || - ext::type_contains_ident(&map.value, &def.module_runtime_generic), - }; - - let query_type = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => value.clone(), - StorageLineTypeDef::Map(map) => map.value.clone(), - StorageLineTypeDef::DoubleMap(map) => map.value.clone(), - StorageLineTypeDef::NMap(map) => map.value.clone(), - }; - let is_option = ext::extract_type_option(&query_type).is_some(); - let value_type = - ext::extract_type_option(&query_type).unwrap_or_else(|| query_type.clone()); - - let module_runtime_generic = &def.module_runtime_generic; - let module_runtime_trait = &def.module_runtime_trait; - let optional_storage_runtime_comma = - if is_generic { Some(quote!( #module_runtime_generic, )) } else { None }; - let optional_storage_runtime_bound_comma = if is_generic { - Some(quote!( #module_runtime_generic: #module_runtime_trait, )) - } else { - None - }; - - let storage_name = &storage_def.name; - let optional_instance_generic = def.module_instance.as_ref().map(|i| { - let instance_generic = &i.instance_generic; - quote!( #instance_generic ) - }); - let storage_struct = quote!( - #storage_name<#optional_storage_runtime_comma #optional_instance_generic> - ); - - let optional_storage_where_clause = - if is_generic { def.where_clause.as_ref().map(|w| quote!( #w )) } else { None }; - - let storage_trait_truncated = match &storage_def.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( StorageValue<#value_type> ) - }, - StorageLineTypeDef::Map(map) => { - let key = &map.key; - quote!( StorageMap<#key, #value_type> ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - quote!( StorageDoubleMap<#key1, #key2, #value_type> ) - }, - StorageLineTypeDef::NMap(map) => { - let keygen = map.to_keygen_struct(hidden_crate); - quote!( StorageNMap<#keygen, #value_type> ) - }, - }; - - let storage_trait = quote!( storage::#storage_trait_truncated ); - let storage_generator_trait = quote!( storage::generator::#storage_trait_truncated ); - - let doc_attrs = storage_def - .attrs - .iter() - .filter(|a| a.meta.path().is_ident("doc")) - .map(|a| a.meta.clone()) - .collect(); - - Self { - attrs: storage_def.attrs, - visibility: storage_def.visibility, - name: storage_def.name, - getter: storage_def.getter, - config: storage_def.config, - max_values: storage_def.max_values, - build: storage_def.build, - default_value: storage_def.default_value, - storage_type: storage_def.storage_type, - doc_attrs, - query_type, - value_type, - storage_struct, - optional_storage_runtime_comma, - optional_storage_runtime_bound_comma, - optional_storage_where_clause, - storage_trait, - storage_generator_trait, - is_generic, - is_option, - } - } -} - -pub enum StorageLineTypeDef { - Map(MapDef), - DoubleMap(Box), - NMap(NMapDef), - Simple(syn::Type), -} - -pub struct MapDef { - pub hasher: HasherKind, - pub key: syn::Type, - /// This is the query value not the inner value used in storage trait implementation. - pub value: syn::Type, -} - -pub struct DoubleMapDef { - pub hasher1: HasherKind, - pub hasher2: HasherKind, - pub key1: syn::Type, - pub key2: syn::Type, - /// This is the query value not the inner value used in storage trait implementation. - pub value: syn::Type, -} - -pub struct NMapDef { - pub hashers: Vec, - pub keys: Vec, - pub value: syn::Type, -} - -impl NMapDef { - fn to_keygen_struct(&self, scrate: &proc_macro2::TokenStream) -> proc_macro2::TokenStream { - if self.keys.len() == 1 { - let hasher = &self.hashers[0].to_storage_hasher_struct(); - let key = &self.keys[0]; - return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) - } - - let key_hasher = self - .keys - .iter() - .zip(&self.hashers) - .map(|(key, hasher)| { - let hasher = hasher.to_storage_hasher_struct(); - quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) - }) - .collect::>(); - quote!(( #(#key_hasher,)* )) - } - - fn to_key_tuple(&self) -> proc_macro2::TokenStream { - if self.keys.len() == 1 { - let key = &self.keys[0]; - return quote!(#key) - } - - let tuple = self.keys.iter().map(|key| quote!(#key)).collect::>(); - quote!(( #(#tuple,)* )) - } -} - -pub struct ExtraGenesisLineDef { - attrs: Vec, - name: syn::Ident, - typ: syn::Type, - default: Option, -} - -#[derive(Debug, Clone)] -pub enum HasherKind { - Blake2_256, - Blake2_128, - Blake2_128Concat, - Twox256, - Twox128, - Twox64Concat, - Identity, -} - -impl HasherKind { - fn to_storage_hasher_struct(&self) -> proc_macro2::TokenStream { - match self { - HasherKind::Blake2_256 => quote!(Blake2_256), - HasherKind::Blake2_128 => quote!(Blake2_128), - HasherKind::Blake2_128Concat => quote!(Blake2_128Concat), - HasherKind::Twox256 => quote!(Twox256), - HasherKind::Twox128 => quote!(Twox128), - HasherKind::Twox64Concat => quote!(Twox64Concat), - HasherKind::Identity => quote!(Identity), - } - } - - fn into_metadata(&self) -> proc_macro2::TokenStream { - match self { - HasherKind::Blake2_256 => quote!(StorageHasherIR::Blake2_256), - HasherKind::Blake2_128 => quote!(StorageHasherIR::Blake2_128), - HasherKind::Blake2_128Concat => quote!(StorageHasherIR::Blake2_128Concat), - HasherKind::Twox256 => quote!(StorageHasherIR::Twox256), - HasherKind::Twox128 => quote!(StorageHasherIR::Twox128), - HasherKind::Twox64Concat => quote!(StorageHasherIR::Twox64Concat), - HasherKind::Identity => quote!(StorageHasherIR::Identity), - } - } -} - -/// Full implementation of decl_storage. -pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let def = syn::parse_macro_input!(input as DeclStorageDef); - let def_ext = DeclStorageDefExt::from(def); - - print_pallet_upgrade::maybe_print_pallet_upgrade(&def_ext); - - let scrate = &def_ext.hidden_crate; - let scrate_decl = &def_ext.hidden_imports; - let store_trait = store_trait::decl_and_impl(&def_ext); - let getters = getters::impl_getters(&def_ext); - let metadata = metadata::impl_metadata(&def_ext); - let instance_trait = instance_trait::decl_and_impl(&def_ext); - let genesis_config = genesis_config::genesis_config_and_build_storage(&def_ext); - let storage_struct = storage_struct::decl_and_impl(&def_ext); - let storage_info = storage_info::impl_storage_info(&def_ext); - - quote!( - use #scrate::{ - StorageValue as _, - StorageMap as _, - StorageDoubleMap as _, - StorageNMap as _, - StoragePrefixedMap as _, - IterableStorageMap as _, - IterableStorageNMap as _, - IterableStorageDoubleMap as _, - }; - - #scrate_decl - #store_trait - #getters - #metadata - #instance_trait - #genesis_config - #storage_struct - #storage_info - ) - .into() -} diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs deleted file mode 100644 index f9f62e0ff3123..0000000000000 --- a/frame/support/procedural/src/storage/parse.rs +++ /dev/null @@ -1,562 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Parsing of decl_storage input. - -use frame_support_procedural_tools::{syn_ext as ext, Parse, ToTokens}; -use syn::{spanned::Spanned, Ident, Token}; - -mod keyword { - syn::custom_keyword!(generate_storage_info); - syn::custom_keyword!(hiddencrate); - syn::custom_keyword!(add_extra_genesis); - syn::custom_keyword!(extra_genesis_skip_phantom_data_field); - syn::custom_keyword!(config); - syn::custom_keyword!(max_values); - syn::custom_keyword!(build); - syn::custom_keyword!(get); - syn::custom_keyword!(map); - syn::custom_keyword!(double_map); - syn::custom_keyword!(nmap); - syn::custom_keyword!(opaque_blake2_256); - syn::custom_keyword!(opaque_blake2_128); - syn::custom_keyword!(blake2_128_concat); - syn::custom_keyword!(opaque_twox_256); - syn::custom_keyword!(opaque_twox_128); - syn::custom_keyword!(twox_64_concat); - syn::custom_keyword!(identity); - syn::custom_keyword!(hasher); - syn::custom_keyword!(tainted); - syn::custom_keyword!(natural); - syn::custom_keyword!(prehashed); -} - -/// Specific `Opt` to implement structure with optional parsing -#[derive(Debug, Clone)] -pub struct Opt

{ - pub inner: Option

, -} -impl quote::ToTokens for Opt

{ - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - if let Some(ref p) = self.inner { - p.to_tokens(tokens); - } - } -} - -macro_rules! impl_parse_for_opt { - ($struct:ident => $token:path) => { - impl syn::parse::Parse for Opt<$struct> { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek($token) { - input.parse().map(|p| Opt { inner: Some(p) }) - } else { - Ok(Opt { inner: None }) - } - } - } - }; -} - -/// Parsing usage only -#[derive(Parse, ToTokens, Debug)] -struct StorageDefinition { - pub generate_storage_info: Opt, - pub hidden_crate: Opt, - pub visibility: syn::Visibility, - pub trait_token: Token![trait], - pub ident: Ident, - pub for_token: Token![for], - pub module_ident: Ident, - pub mod_lt_token: Token![<], - pub mod_param_generic: syn::Ident, - pub mod_param_bound_token: Option, - pub mod_param_bound: syn::Path, - pub mod_instance_param_token: Option, - pub mod_instance: Option, - pub mod_instantiable_token: Option, - pub mod_instantiable: Option, - pub mod_default_instance_token: Option, - pub mod_default_instance: Option, - pub mod_gt_token: Token![>], - pub as_token: Token![as], - pub crate_ident: Ident, - pub where_clause: Option, - pub content: ext::Braces>, - pub extra_genesis: Opt, -} - -#[derive(Parse, ToTokens, Debug)] -struct GenerateStorageInfo { - pub keyword: keyword::generate_storage_info, -} -impl_parse_for_opt!(GenerateStorageInfo => keyword::generate_storage_info); - -#[derive(Parse, ToTokens, Debug)] -struct SpecificHiddenCrate { - pub keyword: keyword::hiddencrate, - pub ident: ext::Parens, -} -impl_parse_for_opt!(SpecificHiddenCrate => keyword::hiddencrate); - -#[derive(Parse, ToTokens, Debug)] -struct AddExtraGenesis { - pub extragenesis_keyword: keyword::add_extra_genesis, - pub content: ext::Braces, -} - -impl_parse_for_opt!(AddExtraGenesis => keyword::add_extra_genesis); - -#[derive(Parse, ToTokens, Debug)] -struct AddExtraGenesisContent { - pub lines: ext::Punctuated, -} - -#[derive(ToTokens, Debug)] -enum AddExtraGenesisLineEnum { - AddExtraGenesisLine(AddExtraGenesisLine), - AddExtraGenesisBuild(DeclStorageBuild), -} - -impl syn::parse::Parse for AddExtraGenesisLineEnum { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - let input_fork = input.fork(); - // OuterAttributes are forbidden for build variant, - // However to have better documentation we match against the keyword after those attributes. - let _: ext::OuterAttributes = input_fork.parse()?; - let lookahead = input_fork.lookahead1(); - if lookahead.peek(keyword::build) { - Ok(Self::AddExtraGenesisBuild(input.parse()?)) - } else if lookahead.peek(keyword::config) { - Ok(Self::AddExtraGenesisLine(input.parse()?)) - } else { - Err(lookahead.error()) - } - } -} - -#[derive(Parse, ToTokens, Debug)] -struct AddExtraGenesisLine { - pub attrs: ext::OuterAttributes, - pub config_keyword: keyword::config, - pub extra_field: ext::Parens, - pub coldot_token: Token![:], - pub extra_type: syn::Type, - pub default_value: Opt, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageLine { - // attrs (main use case is doc) - pub attrs: ext::OuterAttributes, - // visibility (no need to make optional - pub visibility: syn::Visibility, - // name - pub name: Ident, - pub getter: Opt, - pub config: Opt, - pub max_values: Opt, - pub build: Opt, - pub coldot_token: Token![:], - pub storage_type: DeclStorageType, - pub default_value: Opt, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageGetterBody { - fn_keyword: Token![fn], - ident: Ident, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageGetter { - pub getter_keyword: keyword::get, - pub getfn: ext::Parens, -} - -impl_parse_for_opt!(DeclStorageGetter => keyword::get); - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageConfig { - pub config_keyword: keyword::config, - pub expr: ext::Parens>, -} - -impl_parse_for_opt!(DeclStorageConfig => keyword::config); - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageMaxValues { - pub max_values_keyword: keyword::max_values, - pub expr: ext::Parens, -} -impl_parse_for_opt!(DeclStorageMaxValues => keyword::max_values); - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageBuild { - pub build_keyword: keyword::build, - pub expr: ext::Parens, -} - -impl_parse_for_opt!(DeclStorageBuild => keyword::build); - -#[derive(ToTokens, Debug)] -enum DeclStorageType { - Map(DeclStorageMap), - DoubleMap(Box), - NMap(DeclStorageNMap), - Simple(syn::Type), -} - -impl syn::parse::Parse for DeclStorageType { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek(keyword::map) { - Ok(Self::Map(input.parse()?)) - } else if input.peek(keyword::double_map) { - Ok(Self::DoubleMap(input.parse()?)) - } else if input.peek(keyword::nmap) { - Ok(Self::NMap(input.parse()?)) - } else { - Ok(Self::Simple(input.parse()?)) - } - } -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageMap { - pub map_keyword: keyword::map, - pub hasher: Opt, - pub key: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageDoubleMap { - pub map_keyword: keyword::double_map, - pub hasher1: Opt, - pub key1: syn::Type, - pub comma_keyword: Token![,], - pub hasher2: Opt, - pub key2: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageKey { - pub hasher: Opt, - pub key: syn::Type, -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageNMap { - pub map_keyword: keyword::nmap, - pub storage_keys: ext::PunctuatedTrailing, - pub ass_keyword: Token![=>], - pub value: syn::Type, -} - -#[derive(Clone, ToTokens, Debug)] -enum Hasher { - Blake2_256(keyword::opaque_blake2_256), - Blake2_128(keyword::opaque_blake2_128), - Blake2_128Concat(keyword::blake2_128_concat), - Twox256(keyword::opaque_twox_256), - Twox128(keyword::opaque_twox_128), - Twox64Concat(keyword::twox_64_concat), - Identity(keyword::identity), -} - -impl syn::parse::Parse for Hasher { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - let lookahead = input.lookahead1(); - if lookahead.peek(keyword::opaque_blake2_256) { - Ok(Self::Blake2_256(input.parse()?)) - } else if lookahead.peek(keyword::opaque_blake2_128) { - Ok(Self::Blake2_128(input.parse()?)) - } else if lookahead.peek(keyword::blake2_128_concat) { - Ok(Self::Blake2_128Concat(input.parse()?)) - } else if lookahead.peek(keyword::opaque_twox_256) { - Ok(Self::Twox256(input.parse()?)) - } else if lookahead.peek(keyword::opaque_twox_128) { - Ok(Self::Twox128(input.parse()?)) - } else if lookahead.peek(keyword::twox_64_concat) { - Ok(Self::Twox64Concat(input.parse()?)) - } else if lookahead.peek(keyword::identity) { - Ok(Self::Identity(input.parse()?)) - } else if lookahead.peek(keyword::tainted) { - Ok(Self::Blake2_128Concat(input.parse()?)) - } else if lookahead.peek(keyword::natural) { - Ok(Self::Twox64Concat(input.parse()?)) - } else if lookahead.peek(keyword::prehashed) { - Ok(Self::Identity(input.parse()?)) - } else { - Err(lookahead.error()) - } - } -} - -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageDefault { - pub equal_token: Token![=], - pub expr: syn::Expr, -} - -impl syn::parse::Parse for Opt { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek(Token![=]) { - input.parse().map(|p| Opt { inner: Some(p) }) - } else { - Ok(Opt { inner: None }) - } - } -} - -#[derive(Clone, Parse, ToTokens, Debug)] -struct SetHasher { - pub hasher_keyword: keyword::hasher, - pub inner: ext::Parens, -} - -impl_parse_for_opt!(SetHasher => keyword::hasher); - -impl From for super::HasherKind { - fn from(set_hasher: SetHasher) -> Self { - set_hasher.inner.content.into() - } -} - -impl From for super::HasherKind { - fn from(hasher: Hasher) -> Self { - match hasher { - Hasher::Blake2_256(_) => super::HasherKind::Blake2_256, - Hasher::Blake2_128(_) => super::HasherKind::Blake2_128, - Hasher::Blake2_128Concat(_) => super::HasherKind::Blake2_128Concat, - Hasher::Twox256(_) => super::HasherKind::Twox256, - Hasher::Twox128(_) => super::HasherKind::Twox128, - Hasher::Twox64Concat(_) => super::HasherKind::Twox64Concat, - Hasher::Identity(_) => super::HasherKind::Identity, - } - } -} - -fn get_module_instance( - instance: Option, - instantiable: Option, - default_instance: Option, -) -> syn::Result> { - let right_syntax = "Should be $I: $Instance = $DefaultInstance"; - - if instantiable.as_ref().map_or(false, |i| i != "Instance") { - let msg = format!( - "Instance trait must be named `Instance`, other names are no longer supported, because \ - it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", - instantiable.as_ref().unwrap(), - ); - return Err(syn::Error::new(instantiable.span(), msg)) - } - - match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => - Ok(Some(super::ModuleInstanceDef { - instance_generic: instance, - instance_trait: instantiable, - instance_default: default_instance, - })), - (None, None, None) => Ok(None), - (Some(instance), None, _) => Err(syn::Error::new( - instance.span(), - format!("Expect instantiable trait bound for instance: {}. {}", instance, right_syntax), - )), - (None, Some(instantiable), _) => Err(syn::Error::new( - instantiable.span(), - format!( - "Expect instance generic for bound instantiable: {}. {}", - instantiable, right_syntax, - ), - )), - (None, _, Some(default_instance)) => Err(syn::Error::new( - default_instance.span(), - format!( - "Expect instance generic for default instance: {}. {}", - default_instance, right_syntax, - ), - )), - } -} - -pub fn parse(input: syn::parse::ParseStream) -> syn::Result { - use syn::parse::Parse; - - let def = StorageDefinition::parse(input)?; - - let module_instance = - get_module_instance(def.mod_instance, def.mod_instantiable, def.mod_default_instance)?; - - let mut extra_genesis_config_lines = vec![]; - let mut extra_genesis_build = None; - - for line in def - .extra_genesis - .inner - .into_iter() - .flat_map(|o| o.content.content.lines.inner.into_iter()) - { - match line { - AddExtraGenesisLineEnum::AddExtraGenesisLine(def) => { - extra_genesis_config_lines.push(super::ExtraGenesisLineDef { - attrs: def.attrs.inner, - name: def.extra_field.content, - typ: def.extra_type, - default: def.default_value.inner.map(|o| o.expr), - }); - }, - AddExtraGenesisLineEnum::AddExtraGenesisBuild(def) => { - if extra_genesis_build.is_some() { - return Err(syn::Error::new( - def.span(), - "Only one build expression allowed for extra genesis", - )) - } - - extra_genesis_build = Some(def.expr.content); - }, - } - } - - let storage_lines = parse_storage_line_defs(def.content.content.inner.into_iter())?; - - Ok(super::DeclStorageDef { - generate_storage_info: def.generate_storage_info.inner.is_some(), - hidden_crate: def.hidden_crate.inner.map(|i| i.ident.content), - visibility: def.visibility, - module_name: def.module_ident, - store_trait: def.ident, - module_runtime_generic: def.mod_param_generic, - module_runtime_trait: def.mod_param_bound, - where_clause: def.where_clause, - crate_name: def.crate_ident, - module_instance, - extra_genesis_build, - extra_genesis_config_lines, - storage_lines, - }) -} - -/// Parse the `DeclStorageLine` into `StorageLineDef`. -fn parse_storage_line_defs( - defs: impl Iterator, -) -> syn::Result> { - let mut storage_lines = Vec::::new(); - - for line in defs { - let getter = line.getter.inner.map(|o| o.getfn.content.ident); - let config = if let Some(config) = line.config.inner { - if let Some(ident) = config.expr.content { - Some(ident) - } else if let Some(ref ident) = getter { - Some(ident.clone()) - } else { - return Err(syn::Error::new( - config.span(), - "Invalid storage definition, couldn't find config identifier: storage must \ - either have a get identifier `get(fn ident)` or a defined config identifier \ - `config(ident)`", - )) - } - } else { - None - }; - - if let Some(ref config) = config { - storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each( - |other_config| { - if other_config == config { - Err(syn::Error::new( - config.span(), - "`config()`/`get()` with the same name already defined.", - )) - } else { - Ok(()) - } - }, - )?; - } - - let max_values = match &line.storage_type { - DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => - line.max_values.inner.map(|i| i.expr.content), - DeclStorageType::Simple(_) => - if let Some(max_values) = line.max_values.inner { - let msg = "unexpected max_values attribute for storage value."; - let span = max_values.max_values_keyword.span(); - return Err(syn::Error::new(span, msg)) - } else { - Some(syn::parse_quote!(1u32)) - }, - }; - - let span = line.storage_type.span(); - let no_hasher_error = || { - syn::Error::new( - span, - "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead.", - ) - }; - - let storage_type = match line.storage_type { - DeclStorageType::Map(map) => super::StorageLineTypeDef::Map(super::MapDef { - hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), - key: map.key, - value: map.value, - }), - DeclStorageType::DoubleMap(map) => - super::StorageLineTypeDef::DoubleMap(Box::new(super::DoubleMapDef { - hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), - hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), - key1: map.key1, - key2: map.key2, - value: map.value, - })), - DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap(super::NMapDef { - hashers: map - .storage_keys - .inner - .iter() - .map(|pair| Ok(pair.hasher.inner.clone().ok_or_else(no_hasher_error)?.into())) - .collect::, syn::Error>>()?, - keys: map.storage_keys.inner.iter().map(|pair| pair.key.clone()).collect(), - value: map.value, - }), - DeclStorageType::Simple(expr) => super::StorageLineTypeDef::Simple(expr), - }; - - storage_lines.push(super::StorageLineDef { - attrs: line.attrs.inner, - visibility: line.visibility, - name: line.name, - getter, - config, - max_values, - build: line.build.inner.map(|o| o.expr.content), - default_value: line.default_value.inner.map(|o| o.expr), - storage_type, - }) - } - - Ok(storage_lines) -} diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs deleted file mode 100644 index 4e330fd4b85f2..0000000000000 --- a/frame/support/procedural/src/storage/print_pallet_upgrade.rs +++ /dev/null @@ -1,385 +0,0 @@ -use super::StorageLineTypeDef; -use frame_support_procedural_tools::clean_type_string; -use quote::ToTokens; - -/// Environment variable that tells us to print pallet upgrade helper. -const PRINT_PALLET_UPGRADE: &str = "PRINT_PALLET_UPGRADE"; - -fn check_print_pallet_upgrade() -> bool { - std::env::var(PRINT_PALLET_UPGRADE).is_ok() -} - -/// Convert visibilty as now objects are defined in a module. -fn convert_vis(vis: &syn::Visibility) -> &'static str { - match vis { - syn::Visibility::Inherited => "pub(super)", - syn::Visibility::Public(_) => "pub", - _ => "/* TODO_VISIBILITY */", - } -} - -/// fn to convert to token stream then string using display and then call clean_type_string on it. -fn to_cleaned_string(t: impl quote::ToTokens) -> String { - clean_type_string(&format!("{}", t.into_token_stream())) -} - -/// Print an incomplete upgrade from decl_storage macro to new pallet attribute. -pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { - if !check_print_pallet_upgrade() { - return - } - - let scrate = "e::quote!(frame_support); - - let config_gen = - if def.optional_instance.is_some() { "" } else { Default::default() }; - - let impl_gen = - if def.optional_instance.is_some() { ", I: 'static>" } else { "" }; - - let decl_gen = if def.optional_instance.is_some() { "" } else { "" }; - - let full_decl_gen = if def.optional_instance.is_some() { - ", I: 'static = ()>" - } else { - "" - }; - - let use_gen = if def.optional_instance.is_some() { "" } else { "" }; - - let use_gen_tuple = if def.optional_instance.is_some() { "<(T, I)>" } else { "" }; - - let mut genesis_config = String::new(); - let mut genesis_build = String::new(); - - let genesis_config_builder_def = super::genesis_config::BuilderDef::from_def(scrate, def); - if !genesis_config_builder_def.blocks.is_empty() { - let genesis_config_def = match super::genesis_config::GenesisConfigDef::from_def(def) { - Ok(g) => g, - Err(err) => { - println!("Could not print upgrade due compile error: {:?}", err); - return - }, - }; - - let genesis_config_impl_gen = - if genesis_config_def.is_generic { impl_gen } else { Default::default() }; - - let genesis_config_use_gen = - if genesis_config_def.is_generic { use_gen } else { Default::default() }; - - let genesis_config_decl_gen = if genesis_config_def.is_generic { - if def.optional_instance.is_some() { - ", I: 'static = ()>" - } else { - "" - } - } else { - Default::default() - }; - - let mut genesis_config_decl_fields = String::new(); - let mut genesis_config_default_fields = String::new(); - for field in &genesis_config_def.fields { - genesis_config_decl_fields.push_str(&format!( - " - {attrs}pub {name}: {typ},", - attrs = field.attrs.iter().fold(String::new(), |res, attr| { - format!( - "{}#[{}] - ", - res, - attr.to_token_stream() - ) - }), - name = field.name, - typ = to_cleaned_string(&field.typ), - )); - - genesis_config_default_fields.push_str(&format!( - " - {name}: {default},", - name = field.name, - default = to_cleaned_string(&field.default), - )); - } - - genesis_config = format!( - " - #[pallet::genesis_config] - pub struct GenesisConfig{genesis_config_decl_gen} - // TODO_MAYBE_WHERE_CLAUSE - {{{genesis_config_decl_fields} - }} - - #[cfg(feature = \"std\")] - impl{genesis_config_impl_gen} Default for GenesisConfig{genesis_config_use_gen} - // TODO_MAYBE_WHERE_CLAUSE - {{ - fn default() -> Self {{ - Self {{{genesis_config_default_fields} - }} - }} - }}", - genesis_config_decl_gen = genesis_config_decl_gen, - genesis_config_decl_fields = genesis_config_decl_fields, - genesis_config_impl_gen = genesis_config_impl_gen, - genesis_config_default_fields = genesis_config_default_fields, - genesis_config_use_gen = genesis_config_use_gen, - ); - - let genesis_config_build = - genesis_config_builder_def.blocks.iter().fold(String::new(), |res, block| { - format!( - "{} - {}", - res, - to_cleaned_string(block), - ) - }); - - genesis_build = format!( - " - #[pallet::genesis_build] - impl{impl_gen} GenesisBuild{use_gen} for GenesisConfig{genesis_config_use_gen} - // TODO_MAYBE_WHERE_CLAUSE - {{ - fn build(&self) {{{genesis_config_build} - }} - }}", - impl_gen = impl_gen, - use_gen = use_gen, - genesis_config_use_gen = genesis_config_use_gen, - genesis_config_build = genesis_config_build, - ); - } - - let mut storages = String::new(); - for line in &def.storage_lines { - let storage_vis = convert_vis(&line.visibility); - - let getter = if let Some(getter) = &line.getter { - format!( - " - #[pallet::getter(fn {getter})]", - getter = getter - ) - } else { - Default::default() - }; - - let value_type = &line.value_type; - - let default_value_type_value = line - .default_value - .as_ref() - .map(|default_expr| { - format!( - " - #[pallet::type_value] - {storage_vis} fn DefaultFor{name} /* TODO_MAYBE_GENERICS */ () -> {value_type} {{ - {default_expr} - }} -", - name = line.name, - storage_vis = storage_vis, - value_type = to_cleaned_string(&line.value_type), - default_expr = to_cleaned_string(&default_expr), - ) - }) - .unwrap_or_else(String::new); - - let comma_query_kind = if line.is_option { - if line.default_value.is_some() { - ", OptionQuery" - } else { - Default::default() - } - } else { - ", ValueQuery" - }; - - let comma_default_value_getter_name = line - .default_value - .as_ref() - .map(|_| format!(", DefaultFor{}", line.name)) - .unwrap_or_else(String::new); - - let typ = match &line.storage_type { - StorageLineTypeDef::Map(map) => { - format!( - "StorageMap<_, {hasher}, {key}, {value_type}{comma_query_kind}\ - {comma_default_value_getter_name}>", - hasher = &map.hasher.to_storage_hasher_struct(), - key = to_cleaned_string(&map.key), - value_type = to_cleaned_string(&value_type), - comma_query_kind = comma_query_kind, - comma_default_value_getter_name = comma_default_value_getter_name, - ) - }, - StorageLineTypeDef::DoubleMap(double_map) => { - format!( - "StorageDoubleMap<_, {hasher1}, {key1}, {hasher2}, {key2}, {value_type}\ - {comma_query_kind}{comma_default_value_getter_name}>", - hasher1 = double_map.hasher1.to_storage_hasher_struct(), - key1 = to_cleaned_string(&double_map.key1), - hasher2 = double_map.hasher2.to_storage_hasher_struct(), - key2 = to_cleaned_string(&double_map.key2), - value_type = to_cleaned_string(&value_type), - comma_query_kind = comma_query_kind, - comma_default_value_getter_name = comma_default_value_getter_name, - ) - }, - StorageLineTypeDef::NMap(map) => { - format!( - "StorageNMap<_, {keygen}, {value_type}{comma_query_kind}\ - {comma_default_value_getter_name}>", - keygen = map.to_keygen_struct(&def.hidden_crate), - value_type = to_cleaned_string(&value_type), - comma_query_kind = comma_query_kind, - comma_default_value_getter_name = comma_default_value_getter_name, - ) - }, - StorageLineTypeDef::Simple(_) => { - format!( - "StorageValue<_, {value_type}{comma_query_kind}\ - {comma_default_value_getter_name}>", - value_type = to_cleaned_string(&value_type), - comma_query_kind = comma_query_kind, - comma_default_value_getter_name = comma_default_value_getter_name, - ) - }, - }; - - let additional_comment = if line.is_option && line.default_value.is_some() { - " // TODO: This type of storage is no longer supported: `OptionQuery` cannot be used \ - alongside a not-none value on empty storage. Please use `ValueQuery` instead." - } else { - "" - }; - - storages.push_str(&format!( - " -{default_value_type_value}{doc} - #[pallet::storage]{getter} - {storage_vis} type {name}{full_decl_gen} = {typ};{additional_comment}", - default_value_type_value = default_value_type_value, - getter = getter, - storage_vis = storage_vis, - name = line.name, - full_decl_gen = full_decl_gen, - typ = typ, - additional_comment = additional_comment, - doc = line.doc_attrs.iter().fold(String::new(), |mut res, attr| { - if let syn::Meta::NameValue(name_value) = attr { - if name_value.path.is_ident("doc") { - res = format!( - "{} - ///{}", - res, - name_value.value.to_token_stream(), - ); - } - } - res - }), - )); - } - - let deprecated_instance_stuff = if def.optional_instance.is_some() { - " - /// Old name for default instance generated by decl_storage. - #[deprecated(note=\"use `()` instead\")] - pub type DefaultInstance = (); - - /// Old name for instance trait used by old macros. - #[deprecated(note=\"use `'static` instead\")] - pub trait Instance: 'static {} - impl Instance for I {}" - } else { - "" - }; - - println!( - " -// Template for pallet upgrade for {pallet_name} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet {{ - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use super::*; - - #[pallet::config] - pub trait Config{config_gen}: frame_system::Config - // TODO_MAYBE_ADDITIONAL_BOUNDS_AND_WHERE_CLAUSE - {{ - // TODO_ASSOCIATED_TYPE_AND_CONSTANTS - }} - - {deprecated_instance_stuff} - - #[pallet::pallet] - #[pallet::generate_store({store_vis} trait Store)] - pub struct Pallet{decl_gen}(PhantomData{use_gen_tuple}); - - #[pallet::hooks] - impl{impl_gen} Hooks> for Pallet{use_gen} - // TODO_MAYBE_WHERE_CLAUSE - {{ - // TODO_ON_FINALIZE - // TODO_ON_INITIALIZE - // TODO_ON_RUNTIME_UPGRADE - // TODO_INTEGRITY_TEST - // TODO_OFFCHAIN_WORKER - }} - - #[pallet::call] - impl{impl_gen} Pallet{use_gen} - // TODO_MAYBE_WHERE_CLAUSE - {{ - // TODO_UPGRADE_DISPATCHABLES - }} - - #[pallet::inherent] - // TODO_INHERENT - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - // TODO_EVENT - - // TODO_REMOVE_IF_NO_EVENT - /// Old name generated by `decl_event`. - #[deprecated(note=\"use `Event` instead\")] - pub type RawEvent /* TODO_PUT_EVENT_GENERICS */ = Event /* TODO_PUT_EVENT_GENERICS */; - - #[pallet::error] - // TODO_ERROR - - #[pallet::origin] - // TODO_ORIGIN - - #[pallet::validate_unsigned] - // TODO_VALIDATE_UNSIGNED - - {storages} - - {genesis_config} - - {genesis_build} -}}", - config_gen = config_gen, - store_vis = convert_vis(&def.visibility), - impl_gen = impl_gen, - use_gen = use_gen, - use_gen_tuple = use_gen_tuple, - decl_gen = decl_gen, - storages = storages, - genesis_config = genesis_config, - genesis_build = genesis_build, - pallet_name = def.crate_name, - deprecated_instance_stuff = deprecated_instance_stuff, - ); -} diff --git a/frame/support/procedural/src/storage/storage_info.rs b/frame/support/procedural/src/storage/storage_info.rs deleted file mode 100644 index 3e851b04231ac..0000000000000 --- a/frame/support/procedural/src/storage/storage_info.rs +++ /dev/null @@ -1,59 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of trait `StorageInfoTrait` on module structure. - -use super::DeclStorageDefExt; -use proc_macro2::TokenStream; -use quote::quote; - -pub fn impl_storage_info(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - - let mut res_append_storage = TokenStream::new(); - - for line in def.storage_lines.iter() { - let storage_struct = &line.storage_struct; - - let (trait_, method) = if def.generate_storage_info { - (quote!(#scrate::traits::StorageInfoTrait), quote!(storage_info)) - } else { - (quote!(#scrate::traits::PartialStorageInfoTrait), quote!(partial_storage_info)) - }; - - res_append_storage.extend(quote!( - let mut storage_info = < - #storage_struct as #trait_ - >::#method(); - res.append(&mut storage_info); - )); - } - - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - impl #module_impl #scrate::traits::StorageInfoTrait for #module_struct #where_clause { - fn storage_info() -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> { - let mut res = #scrate::sp_std::vec![]; - #res_append_storage - res - } - } - ) -} diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs deleted file mode 100644 index 373e722122235..0000000000000 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ /dev/null @@ -1,564 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of storage structures and implementation of storage traits on them. - -use super::{instance_trait::INHERENT_INSTANCE_NAME, DeclStorageDefExt, StorageLineTypeDef}; -use proc_macro2::{Ident, Span, TokenStream}; -use quote::quote; - -fn from_optional_value_to_query(is_option: bool, default: &Option) -> TokenStream { - let default = default - .as_ref() - .map(|d| quote!( #d )) - .unwrap_or_else(|| quote!(Default::default())); - - if !is_option { - // raw type case - quote!( v.unwrap_or_else(|| #default ) ) - } else { - // Option<> type case - quote!( v.or_else(|| #default ) ) - } -} - -fn from_query_to_optional_value(is_option: bool) -> TokenStream { - if !is_option { - // raw type case - quote!(Some(v)) - } else { - // Option<> type case - quote!(v) - } -} - -pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let scrate = &def.hidden_crate; - let mut impls = TokenStream::new(); - - for line in &def.storage_lines { - // Propagate doc attributes. - let attrs = &line.doc_attrs; - - let visibility = &line.visibility; - let optional_storage_runtime_comma = &line.optional_storage_runtime_comma; - let optional_storage_runtime_bound_comma = &line.optional_storage_runtime_bound_comma; - let optional_storage_where_clause = &line.optional_storage_where_clause; - let optional_instance_bound_optional_default = - &def.optional_instance_bound_optional_default; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance = &def.optional_instance; - let name = &line.name; - - let struct_decl = quote!( - #( #[ #attrs ] )* - #visibility struct #name< - #optional_storage_runtime_bound_comma #optional_instance_bound_optional_default - >( - #scrate::sp_std::marker::PhantomData< - (#optional_storage_runtime_comma #optional_instance) - > - ) #optional_storage_where_clause; - ); - - let from_query_to_optional_value = from_query_to_optional_value(line.is_option); - let from_optional_value_to_query = - from_optional_value_to_query(line.is_option, &line.default_value); - - // Contains accessor to instance, used to get prefixes - let instance_or_inherent = if let Some(instance) = def.module_instance.as_ref() { - instance.instance_generic.clone() - } else { - Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()) - }; - - let storage_name_bstr = - syn::LitByteStr::new(line.name.to_string().as_ref(), line.name.span()); - - let storage_generator_trait = &line.storage_generator_trait; - let storage_struct = &line.storage_struct; - let impl_trait = quote!( #optional_storage_runtime_bound_comma #optional_instance_bound ); - let value_type = &line.value_type; - let query_type = &line.query_type; - - let struct_impl = match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - StorageLineTypeDef::Map(map) => { - let hasher = map.hasher.to_storage_hasher_struct(); - quote!( - impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> - for #storage_struct #optional_storage_where_clause - { - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - } - - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - type Hasher = #scrate::#hasher; - - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let hasher1 = map.hasher1.to_storage_hasher_struct(); - let hasher2 = map.hasher2.to_storage_hasher_struct(); - quote!( - impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> - for #storage_struct #optional_storage_where_clause - { - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - } - - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - - type Hasher1 = #scrate::#hasher1; - - type Hasher2 = #scrate::#hasher2; - - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - StorageLineTypeDef::NMap(_) => { - quote!( - impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> - for #storage_struct #optional_storage_where_clause - { - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - } - - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - - fn module_prefix() -> &'static [u8] { - <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_bstr - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - }; - - let max_values = if let Some(max_values) = &line.max_values { - quote::quote!({ - let max_values: u32 = (|| #max_values)(); - Some(max_values) - }) - } else { - quote::quote!(None) - }; - - let storage_info_impl = if def.generate_storage_info { - match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( - impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct - #optional_storage_where_clause - { - fn storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - use #scrate::sp_runtime::SaturatedConversion; - - let max_size = < - #value_type as #scrate::codec::MaxEncodedLen - >::max_encoded_len() - .saturated_into(); - - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct as #scrate::#storage_generator_trait - >::module_prefix().to_vec(), - storage_name: < - #storage_struct as #scrate::#storage_generator_trait - >::storage_prefix().to_vec(), - prefix: < - #storage_struct as #scrate::#storage_generator_trait - >::storage_value_final_key().to_vec(), - max_values: Some(1), - max_size: Some(max_size), - } - ] - } - } - ) - }, - StorageLineTypeDef::Map(map) => { - let key = &map.key; - quote!( - impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct - #optional_storage_where_clause - { - fn storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - use #scrate::sp_runtime::SaturatedConversion; - use #scrate::StorageHasher; - - let key_max_size = < - Self as #scrate::storage::generator::StorageMap<_, _> - >::Hasher::max_len::<#key>(); - - let max_size = < - #value_type as #scrate::codec::MaxEncodedLen - >::max_encoded_len() - .saturating_add(key_max_size) - .saturated_into(); - - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: Some(max_size), - } - ] - } - } - ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - quote!( - impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct - #optional_storage_where_clause - { - fn storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - use #scrate::sp_runtime::SaturatedConversion; - use #scrate::StorageHasher; - - let key1_max_size = < - Self as #scrate::storage::generator::StorageDoubleMap<_, _, _> - >::Hasher1::max_len::<#key1>(); - - let key2_max_size = < - Self as #scrate::storage::generator::StorageDoubleMap<_, _, _> - >::Hasher2::max_len::<#key2>(); - - let max_size = < - #value_type as #scrate::codec::MaxEncodedLen - >::max_encoded_len() - .saturating_add(key1_max_size) - .saturating_add(key2_max_size) - .saturated_into(); - - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: Some(max_size), - } - ] - } - } - ) - }, - StorageLineTypeDef::NMap(map) => { - let key = &map.to_keygen_struct(scrate); - quote!( - impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct - #optional_storage_where_clause - { - fn storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - use #scrate::sp_runtime::SaturatedConversion; - - let key_max_size = < - #key as #scrate::storage::types::KeyGeneratorMaxEncodedLen - >::key_max_encoded_len(); - - let max_size = < - #value_type as #scrate::codec::MaxEncodedLen - >::max_encoded_len() - .saturating_add(key_max_size) - .saturated_into(); - - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: Some(max_size), - } - ] - } - } - ) - }, - } - } else { - // Implement `__partial_storage_info` which doesn't require MaxEncodedLen on keys and - // values. - match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( - impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait - for #storage_struct - #optional_storage_where_clause - { - fn partial_storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct as #scrate::#storage_generator_trait - >::module_prefix().to_vec(), - storage_name: < - #storage_struct as #scrate::#storage_generator_trait - >::storage_prefix().to_vec(), - prefix: < - #storage_struct as #scrate::#storage_generator_trait - >::storage_value_final_key().to_vec(), - max_values: Some(1), - max_size: None, - } - ] - } - } - ) - }, - StorageLineTypeDef::Map(_) => { - quote!( - impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait - for #storage_struct - #optional_storage_where_clause - { - fn partial_storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: None, - } - ] - } - } - ) - }, - StorageLineTypeDef::DoubleMap(_) => { - quote!( - impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait - for #storage_struct - #optional_storage_where_clause - { - fn partial_storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: None, - } - ] - } - } - ) - }, - StorageLineTypeDef::NMap(_) => { - quote!( - impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait - for #storage_struct - #optional_storage_where_clause - { - fn partial_storage_info() - -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> - { - #scrate::sp_std::vec![ - #scrate::traits::StorageInfo { - pallet_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::module_prefix().to_vec(), - storage_name: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::storage_prefix().to_vec(), - prefix: < - #storage_struct - as #scrate::storage::StoragePrefixedMap<#value_type> - >::final_prefix().to_vec(), - max_values: #max_values, - max_size: None, - } - ] - } - } - ) - }, - } - }; - - impls.extend(quote!( - #struct_decl - #struct_impl - #storage_info_impl - )) - } - - impls -} diff --git a/frame/support/procedural/src/storage/store_trait.rs b/frame/support/procedural/src/storage/store_trait.rs deleted file mode 100644 index 5dca502518a33..0000000000000 --- a/frame/support/procedural/src/storage/store_trait.rs +++ /dev/null @@ -1,55 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Declaration of store trait and implementation on module structure. - -use super::DeclStorageDefExt; -use proc_macro2::TokenStream; -use quote::quote; - -pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let decl_store_items = def.storage_lines.iter().map(|sline| &sline.name).fold( - TokenStream::new(), - |mut items, name| { - items.extend(quote!(type #name;)); - items - }, - ); - - let impl_store_items = def.storage_lines.iter().fold(TokenStream::new(), |mut items, line| { - let name = &line.name; - let storage_struct = &line.storage_struct; - - items.extend(quote!(type #name = #storage_struct;)); - items - }); - - let visibility = &def.visibility; - let store_trait = &def.store_trait; - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - #visibility trait #store_trait { - #decl_store_items - } - impl #module_impl #store_trait for #module_struct #where_clause { - #impl_store_items - } - ) -} diff --git a/frame/support/procedural/src/storage_alias.rs b/frame/support/procedural/src/storage_alias.rs index b44a7ee997fe2..d1d1aba47998e 100644 --- a/frame/support/procedural/src/storage_alias.rs +++ b/frame/support/procedural/src/storage_alias.rs @@ -22,78 +22,48 @@ use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; use syn::{ - ext::IdentExt, parenthesized, parse::{Parse, ParseStream}, punctuated::Punctuated, - token, Attribute, Error, Ident, Result, Token, Type, TypeParam, Visibility, WhereClause, + spanned::Spanned, + token, + visit::Visit, + Attribute, Error, Ident, Result, Token, Type, TypeParam, Visibility, WhereClause, }; -/// Represents a path that only consists of [`Ident`] separated by `::`. -struct SimplePath { - leading_colon: Option, - segments: Punctuated, +/// Extension trait for [`Type`]. +trait TypeExt { + fn get_ident(&self) -> Option<&Ident>; + fn contains_ident(&self, ident: &Ident) -> bool; } -impl SimplePath { - /// Returns the [`Ident`] of this path. - /// - /// It only returns `Some(_)` if there is exactly one element and no leading colon. +impl TypeExt for Type { fn get_ident(&self) -> Option<&Ident> { - if self.segments.len() != 1 || self.leading_colon.is_some() { - None - } else { - self.segments.first() + match self { + Type::Path(p) => match &p.qself { + Some(qself) => qself.ty.get_ident(), + None => p.path.get_ident(), + }, + _ => None, } } -} - -impl Parse for SimplePath { - fn parse(input: ParseStream<'_>) -> Result { - Ok(Self { - leading_colon: if input.peek(Token![::]) { Some(input.parse()?) } else { None }, - segments: Punctuated::parse_separated_nonempty_with(input, |p| Ident::parse_any(p))?, - }) - } -} - -impl ToTokens for SimplePath { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.leading_colon.to_tokens(tokens); - self.segments.to_tokens(tokens); - } -} - -/// Represents generics which only support [`Ident`] separated by commas as you would pass it to a -/// type. -struct TypeGenerics { - lt_token: Token![<], - params: Punctuated, - gt_token: Token![>], -} -impl TypeGenerics { - /// Returns the generics for types declarations etc. - fn iter(&self) -> impl Iterator { - self.params.iter() - } -} + fn contains_ident(&self, ident: &Ident) -> bool { + struct ContainsIdent<'a> { + ident: &'a Ident, + found: bool, + } + impl<'a, 'ast> Visit<'ast> for ContainsIdent<'a> { + fn visit_ident(&mut self, i: &'ast Ident) { + if i == self.ident { + self.found = true; + } + } + } -impl Parse for TypeGenerics { - fn parse(input: ParseStream<'_>) -> Result { - Ok(Self { - lt_token: input.parse()?, - params: Punctuated::parse_separated_nonempty(input)?, - gt_token: input.parse()?, - }) - } -} - -impl ToTokens for TypeGenerics { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lt_token.to_tokens(tokens); - self.params.to_tokens(tokens); - self.gt_token.to_tokens(tokens); + let mut visitor = ContainsIdent { ident, found: false }; + syn::visit::visit_type(&mut visitor, self); + visitor.found } } @@ -142,13 +112,22 @@ mod storage_types { syn::custom_keyword!(StorageNMap); } +/// The types of prefixes the storage alias macro supports. +mod prefix_types { + // Use the verbatim/unmodified input name as the prefix. + syn::custom_keyword!(verbatim); + // The input type is a pallet and its pallet name should be used as the prefix. + syn::custom_keyword!(pallet_name); + // The input type implements `Get<'static str>` and this `str` should be used as the prefix. + syn::custom_keyword!(dynamic); +} + /// The supported storage types enum StorageType { Value { _kw: storage_types::StorageValue, _lt_token: Token![<], - prefix: SimplePath, - prefix_generics: Option, + prefix: Type, _value_comma: Token![,], value_ty: Type, query_type: Option<(Token![,], Type)>, @@ -158,8 +137,7 @@ enum StorageType { Map { _kw: storage_types::StorageMap, _lt_token: Token![<], - prefix: SimplePath, - prefix_generics: Option, + prefix: Type, _hasher_comma: Token![,], hasher_ty: Type, _key_comma: Token![,], @@ -173,8 +151,7 @@ enum StorageType { CountedMap { _kw: storage_types::CountedStorageMap, _lt_token: Token![<], - prefix: SimplePath, - prefix_generics: Option, + prefix: Type, _hasher_comma: Token![,], hasher_ty: Type, _key_comma: Token![,], @@ -188,8 +165,7 @@ enum StorageType { DoubleMap { _kw: storage_types::StorageDoubleMap, _lt_token: Token![<], - prefix: SimplePath, - prefix_generics: Option, + prefix: Type, _hasher1_comma: Token![,], hasher1_ty: Type, _key1_comma: Token![,], @@ -207,8 +183,7 @@ enum StorageType { NMap { _kw: storage_types::StorageNMap, _lt_token: Token![<], - prefix: SimplePath, - prefix_generics: Option, + prefix: Type, _paren_comma: Token![,], _paren_token: token::Paren, key_types: Punctuated, @@ -231,6 +206,7 @@ impl StorageType { visibility: &Visibility, attributes: &[Attribute], ) -> TokenStream { + let storage_instance_generics = &storage_instance.generics; let storage_instance = &storage_instance.name; let attributes = attributes.iter(); let storage_generics = storage_generics.map(|g| { @@ -240,22 +216,20 @@ impl StorageType { }); match self { - Self::Value { value_ty, query_type, prefix_generics, .. } => { + Self::Value { value_ty, query_type, .. } => { let query_type = query_type.as_ref().map(|(c, t)| quote!(#c #t)); quote! { #( #attributes )* #visibility type #storage_name #storage_generics = #crate_::storage::types::StorageValue< - #storage_instance #prefix_generics, + #storage_instance #storage_instance_generics, #value_ty #query_type >; } }, - Self::CountedMap { - value_ty, query_type, hasher_ty, key_ty, prefix_generics, .. - } | - Self::Map { value_ty, query_type, hasher_ty, key_ty, prefix_generics, .. } => { + Self::CountedMap { value_ty, query_type, hasher_ty, key_ty, .. } | + Self::Map { value_ty, query_type, hasher_ty, key_ty, .. } => { let query_type = query_type.as_ref().map(|(c, t)| quote!(#c #t)); let map_type = Ident::new( match self { @@ -268,7 +242,7 @@ impl StorageType { quote! { #( #attributes )* #visibility type #storage_name #storage_generics = #crate_::storage::types::#map_type< - #storage_instance #prefix_generics, + #storage_instance #storage_instance_generics, #hasher_ty, #key_ty, #value_ty @@ -283,7 +257,6 @@ impl StorageType { key1_ty, hasher2_ty, key2_ty, - prefix_generics, .. } => { let query_type = query_type.as_ref().map(|(c, t)| quote!(#c #t)); @@ -291,7 +264,7 @@ impl StorageType { quote! { #( #attributes )* #visibility type #storage_name #storage_generics = #crate_::storage::types::StorageDoubleMap< - #storage_instance #prefix_generics, + #storage_instance #storage_instance_generics, #hasher1_ty, #key1_ty, #hasher2_ty, @@ -301,14 +274,14 @@ impl StorageType { >; } }, - Self::NMap { value_ty, query_type, key_types, prefix_generics, .. } => { + Self::NMap { value_ty, query_type, key_types, .. } => { let query_type = query_type.as_ref().map(|(c, t)| quote!(#c #t)); let key_types = key_types.iter(); quote! { #( #attributes )* #visibility type #storage_name #storage_generics = #crate_::storage::types::StorageNMap< - #storage_instance #prefix_generics, + #storage_instance #storage_instance_generics, ( #( #key_types ),* ), #value_ty #query_type @@ -319,7 +292,7 @@ impl StorageType { } /// The prefix for this storage type. - fn prefix(&self) -> &SimplePath { + fn prefix(&self) -> &Type { match self { Self::Value { prefix, .. } | Self::Map { prefix, .. } | @@ -328,17 +301,6 @@ impl StorageType { Self::DoubleMap { prefix, .. } => prefix, } } - - /// The prefix generics for this storage type. - fn prefix_generics(&self) -> Option<&TypeGenerics> { - match self { - Self::Value { prefix_generics, .. } | - Self::Map { prefix_generics, .. } | - Self::CountedMap { prefix_generics, .. } | - Self::NMap { prefix_generics, .. } | - Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), - } - } } impl Parse for StorageType { @@ -353,23 +315,11 @@ impl Parse for StorageType { } }; - let parse_pallet_generics = |input: ParseStream<'_>| -> Result> { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![<]) { - Ok(Some(input.parse()?)) - } else if lookahead.peek(Token![,]) { - Ok(None) - } else { - Err(lookahead.error()) - } - }; - if lookahead.peek(storage_types::StorageValue) { Ok(Self::Value { _kw: input.parse()?, _lt_token: input.parse()?, prefix: input.parse()?, - prefix_generics: parse_pallet_generics(input)?, _value_comma: input.parse()?, value_ty: input.parse()?, query_type: parse_query_type(input)?, @@ -381,7 +331,6 @@ impl Parse for StorageType { _kw: input.parse()?, _lt_token: input.parse()?, prefix: input.parse()?, - prefix_generics: parse_pallet_generics(input)?, _hasher_comma: input.parse()?, hasher_ty: input.parse()?, _key_comma: input.parse()?, @@ -397,7 +346,6 @@ impl Parse for StorageType { _kw: input.parse()?, _lt_token: input.parse()?, prefix: input.parse()?, - prefix_generics: parse_pallet_generics(input)?, _hasher_comma: input.parse()?, hasher_ty: input.parse()?, _key_comma: input.parse()?, @@ -413,7 +361,6 @@ impl Parse for StorageType { _kw: input.parse()?, _lt_token: input.parse()?, prefix: input.parse()?, - prefix_generics: parse_pallet_generics(input)?, _hasher1_comma: input.parse()?, hasher1_ty: input.parse()?, _key1_comma: input.parse()?, @@ -434,7 +381,6 @@ impl Parse for StorageType { _kw: input.parse()?, _lt_token: input.parse()?, prefix: input.parse()?, - prefix_generics: parse_pallet_generics(input)?, _paren_comma: input.parse()?, _paren_token: parenthesized!(content in input), key_types: Punctuated::parse_terminated(&content)?, @@ -508,20 +454,50 @@ impl Parse for Input { } } +/// Defines which type of prefix the storage alias is using. +#[derive(Clone, Copy)] +enum PrefixType { + /// An appropriate prefix will be determined automatically. + /// + /// If generics are passed, this is assumed to be a pallet and the pallet name should be used. + /// Otherwise use the verbatim passed name as prefix. + Compatibility, + /// The provided ident/name will be used as the prefix. + Verbatim, + /// The provided type will be used to determine the prefix. This type must + /// implement `PalletInfoAccess` which specifies the proper name. This + /// name is then used as the prefix. + PalletName, + /// Uses the provided type implementing `Get<'static str>` to determine the prefix. + Dynamic, +} + /// Implementation of the `storage_alias` attribute macro. -pub fn storage_alias(input: TokenStream) -> Result { +pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> Result { let input = syn::parse2::(input)?; let crate_ = generate_crate_access_2018("frame-support")?; + let prefix_type = if attributes.is_empty() { + PrefixType::Compatibility + } else if syn::parse2::(attributes.clone()).is_ok() { + PrefixType::Verbatim + } else if syn::parse2::(attributes.clone()).is_ok() { + PrefixType::PalletName + } else if syn::parse2::(attributes.clone()).is_ok() { + PrefixType::Dynamic + } else { + return Err(Error::new(attributes.span(), "Unknown attributes")) + }; + let storage_instance = generate_storage_instance( &crate_, &input.storage_name, input.storage_generics.as_ref(), input.where_clause.as_ref(), input.storage_type.prefix(), - input.storage_type.prefix_generics(), &input.visibility, matches!(input.storage_type, StorageType::CountedMap { .. }), + prefix_type, )?; let definition = input.storage_type.generate_type_declaration( @@ -545,6 +521,7 @@ pub fn storage_alias(input: TokenStream) -> Result { /// The storage instance to use for the storage alias. struct StorageInstance { name: Ident, + generics: TokenStream, code: TokenStream, } @@ -554,42 +531,84 @@ fn generate_storage_instance( storage_name: &Ident, storage_generics: Option<&SimpleGenerics>, storage_where_clause: Option<&WhereClause>, - prefix: &SimplePath, - prefix_generics: Option<&TypeGenerics>, + prefix: &Type, visibility: &Visibility, is_counted_map: bool, + prefix_type: PrefixType, ) -> Result { - if let Some(ident) = prefix.get_ident().filter(|i| *i == "_") { - return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")) + if let Type::Infer(_) = prefix { + return Err(Error::new(prefix.span(), "`_` is not allowed as prefix by `storage_alias`.")) } - let (pallet_prefix, impl_generics, type_generics) = - if let Some((prefix_generics, storage_generics)) = - prefix_generics.and_then(|p| storage_generics.map(|s| (p, s))) - { - let type_generics = prefix_generics.iter(); - let type_generics2 = prefix_generics.iter(); - let impl_generics = storage_generics - .impl_generics() - .filter(|g| prefix_generics.params.iter().any(|pg| *pg == g.ident)); + let impl_generics_used_by_prefix = storage_generics + .as_ref() + .map(|g| { + g.impl_generics() + .filter(|g| prefix.contains_ident(&g.ident)) + .collect::>() + }) + .unwrap_or_default(); + + let (pallet_prefix, impl_generics, type_generics) = match prefix_type { + PrefixType::Compatibility => + if !impl_generics_used_by_prefix.is_empty() { + let type_generics = impl_generics_used_by_prefix.iter().map(|g| &g.ident); + let impl_generics = impl_generics_used_by_prefix.iter(); + + ( + quote! { + < #prefix as #crate_::traits::PalletInfoAccess>::name() + }, + quote!( #( #impl_generics ),* ), + quote!( #( #type_generics ),* ), + ) + } else if let Some(prefix) = prefix.get_ident() { + let prefix_str = prefix.to_string(); + + (quote!(#prefix_str), quote!(), quote!()) + } else { + return Err(Error::new_spanned( + prefix, + "If there are no generics, the prefix is only allowed to be an identifier.", + )) + }, + PrefixType::Verbatim => { + let prefix_str = match prefix.get_ident() { + Some(p) => p.to_string(), + None => + return Err(Error::new_spanned( + prefix, + "Prefix type `verbatim` requires that the prefix is an ident.", + )), + }; + + (quote!(#prefix_str), quote!(), quote!()) + }, + PrefixType::PalletName => { + let type_generics = impl_generics_used_by_prefix.iter().map(|g| &g.ident); + let impl_generics = impl_generics_used_by_prefix.iter(); ( quote! { - <#prefix < #( #type_generics2 ),* > as #crate_::traits::PalletInfoAccess>::name() + <#prefix as #crate_::traits::PalletInfoAccess>::name() }, quote!( #( #impl_generics ),* ), quote!( #( #type_generics ),* ), ) - } else if let Some(prefix) = prefix.get_ident() { - let prefix_str = prefix.to_string(); + }, + PrefixType::Dynamic => { + let type_generics = impl_generics_used_by_prefix.iter().map(|g| &g.ident); + let impl_generics = impl_generics_used_by_prefix.iter(); - (quote!(#prefix_str), quote!(), quote!()) - } else { - return Err(Error::new_spanned( - prefix, - "If there are no generics, the prefix is only allowed to be an identifier.", - )) - }; + ( + quote! { + <#prefix as #crate_::traits::Get<_>>::get() + }, + quote!( #( #impl_generics ),* ), + quote!( #( #type_generics ),* ), + ) + }, + }; let where_clause = storage_where_clause.map(|w| quote!(#w)).unwrap_or_default(); @@ -644,5 +663,5 @@ fn generate_storage_instance( #counter_code }; - Ok(StorageInstance { name, code }) + Ok(StorageInstance { name, code, generics: quote!( < #type_generics > ) }) } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index bf828b01b6f76..7caff534fc1f9 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -14,6 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "visit", "extra-traits"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "visit", "extra-traits"] } frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index 81b5a8487449b..193df53f12917 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -16,5 +16,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = { version = "1.0.26", features = ["proc-macro"] } -syn = { version = "2.0.14", features = ["proc-macro", "full", "extra-traits", "parsing"] } +quote = { version = "1.0.28", features = ["proc-macro"] } +syn = { version = "2.0.16", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/frame/support/src/crypto/ecdsa.rs b/frame/support/src/crypto/ecdsa.rs index afa45e296d3fd..4d50b51049c90 100644 --- a/frame/support/src/crypto/ecdsa.rs +++ b/frame/support/src/crypto/ecdsa.rs @@ -47,3 +47,19 @@ impl ECDSAExt for Public { }) } } + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{ecdsa, Pair}; + + #[test] + fn to_eth_address_works() { + let pair = ecdsa::Pair::from_string("//Alice//password", None).unwrap(); + let eth_address = pair.public().to_eth_address().unwrap(); + assert_eq!( + array_bytes::bytes2hex("0x", ð_address), + "0xdc1cce4263956850a3c8eb349dc6fc3f7792cb27" + ); + } +} diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 071fb7c9d5aaf..e462066a84605 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -89,6 +89,26 @@ impl From> for RawOrigin { } } +impl RawOrigin { + /// Returns `Some` with a reference to the `AccountId` if `self` is `Signed`, `None` otherwise. + pub fn as_signed(&self) -> Option<&AccountId> { + match &self { + Self::Signed(x) => Some(x), + _ => None, + } + } + + /// Returns `true` if `self` is `Root`, `None` otherwise. + pub fn is_root(&self) -> bool { + matches!(&self, Self::Root) + } + + /// Returns `true` if `self` is `None`, `None` otherwise. + pub fn is_none(&self) -> bool { + matches!(&self, Self::None) + } +} + /// A type that can be used as a parameter in a dispatchable function. /// /// When using `decl_module` all arguments for call functions must implement this trait. @@ -624,2936 +644,27 @@ impl PaysFee for (u64, Pays) { // END TODO -/// Declares a `Module` struct and a `Call` enum, which implements the dispatch logic. -/// -/// ## Declaration -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::dispatch; -/// # use frame_system::{Config, ensure_signed}; -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// -/// // Private functions are dispatchable, but not available to other -/// // FRAME pallets. -/// #[weight = 0] -/// fn my_function(origin, var: u64) -> dispatch::DispatchResult { -/// // Your implementation -/// Ok(()) -/// } -/// -/// // Public functions are both dispatchable and available to other -/// // FRAME pallets. -/// #[weight = 0] -/// pub fn my_public_function(origin) -> dispatch::DispatchResult { -/// // Your implementation -/// Ok(()) -/// } -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// The declaration is set with the header where: -/// -/// * `Module`: The struct generated by the macro, with type `Config`. -/// * `Call`: The enum generated for every pallet, which implements -/// [`Callable`](./dispatch/trait.Callable.html). -/// * `origin`: Alias of `T::RuntimeOrigin`. -/// * `Result`: The expected return type from pallet functions. -/// -/// The first parameter of dispatchable functions must always be `origin`. -/// -/// ### Shorthand Example -/// -/// The macro automatically expands a shorthand function declaration to return the -/// [`DispatchResult`] type. These functions are the same: -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::dispatch; -/// # use frame_system::{Config, ensure_signed}; -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// #[weight = 0] -/// fn my_long_function(origin) -> dispatch::DispatchResult { -/// // Your implementation -/// Ok(()) -/// } -/// -/// #[weight = 0] -/// fn my_short_function(origin) { -/// // Your implementation -/// } -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// ### Consuming only portions of the annotated static weight -/// -/// Per default a callable function consumes all of its static weight as declared via -/// the #\[weight\] attribute. However, there are use cases where only a portion of this -/// weight should be consumed. In that case the static weight is charged pre dispatch and -/// the difference is refunded post dispatch. -/// -/// In order to make use of this feature the function must return `DispatchResultWithPostInfo` -/// in place of the default `DispatchResult`. Then the actually consumed weight can be returned. -/// To consume a non default weight while returning an error -/// [`WithPostDispatchInfo::with_weight`](./weight/trait.WithPostDispatchInfo.html) can be used -/// to augment any error with custom weight information. -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo, PostDispatchInfo}}; -/// # use frame_system::{Config, ensure_signed}; -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// #[weight = 1_000_000] -/// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { -/// ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_parts(100_000, 0)))?; -/// if do_expensive_calc { -/// // do the expensive calculation -/// // ... -/// // return None to indicate that we are using all weight (the default) -/// return Ok(None::.into()); -/// } -/// // expensive calculation not executed: use only a portion of the weight -/// Ok(PostDispatchInfo { actual_weight: Some(Weight::from_parts(100_000, 0)), ..Default::default() }) -/// } -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// ### Transactional Function Example -/// -/// Transactional function discards all changes to storage if it returns `Err`, or commits if -/// `Ok`, via the #\[transactional\] attribute. Note the attribute must be after #\[weight\]. -/// The #\[transactional\] attribute is deprecated since it is the default behaviour. -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::transactional; -/// # use frame_system::Config; -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// #[weight = 0] -/// #[transactional] -/// fn my_short_function(origin) { -/// // Your implementation -/// } -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// ### Privileged Function Example -/// -/// A privileged function checks that the origin of the call is `ROOT`. -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::dispatch; -/// # use frame_system::{Config, ensure_signed, ensure_root}; -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// #[weight = 0] -/// fn my_privileged_function(origin) -> dispatch::DispatchResult { -/// ensure_root(origin)?; -/// // Your implementation -/// Ok(()) -/// } -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// ### Attributes on Functions -/// -/// Attributes on functions are supported, but must be in the order of: -/// 1. Optional #\[doc\] attribute. -/// 2. #\[weight\] attribute. -/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will -/// be written only on the dispatchable functions implemented on `Module`, not on the `Call` enum -/// variant. -/// -/// ## Multiple Module Instances Example -/// -/// A Substrate module can be built such that multiple instances of the same module can be used -/// within a single runtime. For example, the [Balances module](../pallet_balances/index.html) can -/// be added multiple times to your runtime in order to support multiple, independent currencies for -/// your blockchain. Here is an example of how you would declare such a module using the -/// `decl_module!` macro: -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::dispatch; -/// # use frame_system::ensure_signed; -/// # pub struct DefaultInstance; -/// # pub trait Instance: 'static {} -/// # impl Instance for DefaultInstance {} -/// pub trait Config: frame_system::Config {} -/// -/// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::RuntimeOrigin { -/// // Your implementation -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// Note: `decl_storage` must be called to generate `Instance` trait and optionally -/// `DefaultInstance` type. -/// -/// ## Where clause -/// -/// Besides the default `origin: T::RuntimeOrigin`, you can also pass other bounds to the module -/// declaration. This where bound will be replicated to all types generated by this macro. The -/// chaining of multiple trait bounds with `+` is not supported. If multiple bounds for one type are -/// required, it needs to be split up into multiple bounds. -/// -/// ``` -/// # #[macro_use] -/// # extern crate frame_support; -/// # use frame_support::dispatch; -/// # use frame_system::{self as system, ensure_signed}; -/// pub trait Config: system::Config where Self::AccountId: From {} -/// -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin, T::AccountId: From { -/// // Your implementation -/// } -/// } -/// # fn main() {} -/// ``` -/// -/// ## Reserved Functions -/// -/// The following are reserved function signatures: -/// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/main-docs/build/events-errors/). -/// The default behavior is to call `deposit_event` from the [System -/// module](../frame_system/index.html). However, you can write your own implementation for events -/// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your -/// `Module`. -/// -/// The following reserved functions also take the block number (with type `T::BlockNumber`) as an -/// optional input: -/// -/// * `on_runtime_upgrade`: Executes at the beginning of a block prior to on_initialize when there -/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items -/// are used. As such, **calling other modules must be avoided**!! Using this function will -/// implement the [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. -/// Function signature must be `fn on_runtime_upgrade() -> frame_support::weights::Weight`. -/// -/// * `on_initialize`: Executes at the beginning of a block. Using this function will -/// implement the [`OnInitialize`](./trait.OnInitialize.html) trait. -/// Function signature can be either: -/// * `fn on_initialize(n: BlockNumber) -> frame_support::weights::Weight` or -/// * `fn on_initialize() -> frame_support::weights::Weight` -/// -/// * `on_idle`: Executes at the end of a block. Passes a remaining weight to provide a threshold -/// for when to execute non vital functions. Using this function will implement the -/// [`OnIdle`](./traits/trait.OnIdle.html) trait. -/// Function signature is: -/// * `fn on_idle(n: BlockNumber, remaining_weight: Weight) -> frame_support::weights::Weight` -/// -/// * `on_finalize`: Executes at the end of a block. Using this function will -/// implement the [`OnFinalize`](./traits/trait.OnFinalize.html) trait. -/// Function signature can be either: -/// * `fn on_finalize(n: BlockNumber) -> frame_support::weights::Weight` or -/// * `fn on_finalize() -> frame_support::weights::Weight` -/// -/// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future -/// block upon completion. Using this function will implement the -/// [`OffchainWorker`](./traits/trait.OffchainWorker.html) trait. -/// * `integrity_test`: Executes in a test generated by `construct_runtime`, note it doesn't execute -/// in an externalities-provided environment. Implement -/// [`IntegrityTest`](./trait.IntegrityTest.html) trait. -#[macro_export] -#[deprecated(note = "Will be removed soon; use the attribute `#[pallet]` macro instead. - For more info, see: ")] -macro_rules! decl_module { - // Entry point #1. - ( - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $( , I: $instantiable:path $( = $module_default_instance:path )? )? - > - for enum $call_type:ident where origin: $origin_type:ty $(, $where_ty:ty: $where_bound:path )* $(,)? { - $( $t:tt )* - } - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name $(, I: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = frame_system - { $( $where_ty: $where_bound ),* } - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - [] - $($t)* - ); - }; - // Entry point #2. - ( - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $( , I: $instantiable:path $( = $module_default_instance:path )? )? - > - for enum $call_type:ident where - origin: $origin_type:ty, - system = $system:ident - $(, $where_ty:ty: $where_bound:path )* - $(,)? - { - $($t:tt)* - } - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name $(, I: $instantiable $( = $module_default_instance )? )? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $where_ty: $where_bound ),* } - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - [] - $($t)* - ); - }; - - // Normalization expansions. Fills the defaults. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - {} - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $vis:vis fn deposit_event() = default; - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $vis fn deposit_event() = default; } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - {} - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $vis:vis fn deposit_event - $($rest:tt)* - ) => { - compile_error!( - "`deposit_event` function is reserved and must follow the syntax: `$vis:vis fn deposit_event() = default;`" - ); - }; - // Compile error on `deposit_event` being added a second time. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )+ } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $vis:vis fn deposit_event() = default; - $($rest:tt)* - ) => { - compile_error!("`deposit_event` can only be passed once as input."); - }; - // Add on_finalize - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - {} - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { - fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } - } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // compile_error on_finalize, given weight removed syntax. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - {} - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "`on_finalize` can't be given weight attribute anymore, weight must be returned by \ - `on_initialize` or `on_runtime_upgrade` instead" - ); - }; - // Compile error on `on_finalize` being added a second time. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )+ } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!("`on_finalize` can only be passed once as input."); - }; - - // Add on_idle - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - {} - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_idle($param_name1:ident : $param1:ty, $param_name2:ident: $param2:ty $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { - fn on_idle( $param_name1: $param1, $param_name2: $param2 ) -> $return { $( $impl )* } - } - { $( $on_finalize:tt )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // compile_error for invalid on_idle function signature in decl_module - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $(#[weight = $weight:expr])? - fn on_idle - $($rest:tt)* - ) => { - compile_error!("`on_idle` method is reserved and syntax doesn't match expected syntax."); - }; - - // compile_error on_runtime_upgrade, without a given weight removed syntax. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - {} - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "`on_runtime_upgrade` must return Weight, signature has changed." - ); - }; - // compile_error on_runtime_upgrade, given weight removed syntax. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - {} - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "`on_runtime_upgrade` can't be given weight attribute anymore, weight must be returned \ - by the function directly." - ); - }; - // Add on_runtime_upgrade - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - {} - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { - fn on_runtime_upgrade( $( $param_name : $param ),* ) -> $return { $( $impl )* } - } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // Compile error on `on_runtime_upgrade` being added a second time. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )+ } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!("`on_runtime_upgrade` can only be passed once as input."); - }; - // Add integrity_test - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - {} - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn integrity_test() { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { - $(#[doc = $doc_attr])* - fn integrity_test() { $( $impl)* } - } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // Compile error on `integrity_test` being added a second time. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )+ } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn integrity_test() { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!("`integrity_test` can only be passed once as input."); - }; - // compile_error on_initialize, without a given weight removed syntax. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - {} - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "`on_initialize` must return Weight, signature has changed." - ); - }; - // compile_error on_initialize, with given weight removed syntax. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - {} - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "`on_initialize` can't be given weight attribute anymore, weight must be returned \ - by the function directly." - ); - }; - // Add on_initialize - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - {} - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { - fn on_initialize( $( $param_name : $param ),* ) -> $return { $( $impl )* } - } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // Compile error on trying to add a second `on_initialize`. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )+ } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!("`on_initialize` can only be passed once as input."); - }; - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn offchain_worker( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { fn offchain_worker( $( $param_name : $param ),* ) { $( $impl )* } } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // Compile error on trying to add a second `offchain_worker`. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )+ } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - fn offchain_worker( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!("`offchain_worker` can only be passed once as input."); - }; - // This puts a constant in the parsed constants list. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $( #[doc = $doc_attr:tt] )* - const $name:ident: $ty:ty = $value:expr; - $( $rest:tt )* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name - $( , $instance: $instantiable $(= $module_default_instance)? )? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { - $( $constants )* - $( #[doc = $doc_attr ] )* - $name: $ty = $value; - } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - - // Parse error type - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: - $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - type Error = $error_type:ty; - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $error_type } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - // Add default Error if none supplied - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: - $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $($t:tt)* ] - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { __NO_ERROR_DEFINED } - { $( $integrity_test )* } - { $( $storage_version )* } - [ $($t)* ] - $($rest)* - ); - }; - - // Parse storage version - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: - $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - type StorageVersion = $storage_version:path; - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test)* } - { $storage_version } - [ $( $dispatchables )* ] - $($rest)* - ); - }; - - // This puts the function statement into the [], decreasing `$rest` and moving toward finishing the parse. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - $origin:ident $( , $(#[$codec_attr:ident])* $param_name:ident : $param:ty )* $(,)? - ) $( -> $result:ty )* { $( $impl:tt )* } - $($rest:tt)* - ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test)* } - { $( $storage_version )* } - [ - $( $dispatchables )* - $(#[doc = $doc_attr])* - #[weight = $weight] - $(#[$fn_attr])* - $fn_vis fn $fn_name( - $origin $( , $(#[$codec_attr])* $param_name : $param )* - ) $( -> $result )* { $( $impl )* } - { $($instance: $instantiable)? } - ] - $($rest)* - ); - }; - // Add #[weight] if none is defined. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: - $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - $from:ident $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? - ) $( -> $result:ty )* { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!(concat!( - "Missing weight for ", stringify!($ident), - ". Every dispatchable must have a #[weight] attribute." - ) - ); - }; - // Ignore any ident which is not `origin` with type `T::RuntimeOrigin`. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $(#[weight = $weight:expr])? - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - $origin:ident : T::RuntimeOrigin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? - ) $( -> $result:ty )* { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "First parameter of dispatch should be marked `origin` only, with no type specified \ - (a bit like `self`)." - ); - }; - // Ignore any ident which is `origin` but has a type, regardless of the type token itself. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $(#[weight = $weight:expr])? - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - origin : $origin:ty $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? - ) $( -> $result:ty )* { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "First parameter of dispatch should be marked `origin` only, with no type specified \ - (a bit like `self`)." - ); - }; - // Ignore any function missing `origin` as the first parameter. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - $(#[doc = $doc_attr:tt])* - $(#[weight = $weight:expr])? - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - $( $(#[$codec_attr:ident])* $param_name:ident : $param:ty ),* $(,)? - ) $( -> $result:ty )* { $( $impl:tt )* } - $($rest:tt)* - ) => { - compile_error!( - "Implicit conversion to privileged function has been removed. \ - First parameter of dispatch should be marked `origin`. \ - For root-matching dispatch, also add `ensure_root(origin)?`." - ); - }; - // Last normalize step. Triggers `@imp` expansion which is the real expansion. - (@normalize - $(#[$attr:meta])* - pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - [ $( $dispatchables:tt )* ] - ) => { - $crate::decl_module!(@imp - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system { - $( $dispatchables )* - } - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_idle )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - { $( $integrity_test )* } - { $( $storage_version )* } - ); - }; - - // Implementation of Call enum's .dispatch() method. - // TODO: this probably should be a different macro? - - (@call - $ignore:ident - $mod_type:ident<$trait_instance:ident $(, $instance:ident)?> $fn_name:ident $origin:ident $system:ident [ $( $param_name:ident),* ] - ) => { - // We execute all dispatchable in a new storage layer, allowing them - // to return an error at any point, and undoing any storage changes. - $crate::storage::with_storage_layer(|| { - <$mod_type<$trait_instance $(, $instance)?>>::$fn_name( $origin $(, $param_name )* ).map(Into::into).map_err(Into::into) - }) - }; - - // no `deposit_event` function wanted - (@impl_deposit_event - $module:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path)?>; - $system:ident; - { $( $other_where_bounds:tt )* } - ) => {}; - - (@impl_deposit_event - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - $system:ident; - { $( $other_where_bounds:tt )* } - $vis:vis fn deposit_event$(<$event_trait_instance:ident $(, $event_instance:ident)?>)?() = default; - ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> $module<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - /// Deposits an event using `frame_system::Pallet::deposit_event`. - $vis fn deposit_event( - event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::RuntimeEvent> - ) { - <$system::Pallet<$trait_instance>>::deposit_event(event.into()) - } - } - }; - - (@impl_on_initialize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_initialize() -> $return:ty { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_initialize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) -> $return { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); - { $( $impl )* } - } - } - }; - - (@impl_on_initialize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_initialize($param: $param_ty) -> $return { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); - { $( $impl )* } - } - } - }; - - (@impl_on_initialize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - {} - }; - - (@impl_try_state_default - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - #[cfg(feature = "try-runtime")] - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::TryState<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn try_state( - _: <$trait_instance as $system::Config>::BlockNumber, - _: $crate::traits::TryStateSelect, - ) -> Result<(), &'static str> { - let pallet_name = << - $trait_instance - as - $system::Config - >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); - $crate::log::debug!( - target: $crate::LOG_TARGET, - "⚠️ pallet {} cannot have try-state because it is using decl_module!", - pallet_name, - ); - Ok(()) - } - } - }; - - (@impl_on_runtime_upgrade - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_runtime_upgrade() -> $return:ty { $( $impl:tt )* } - ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnRuntimeUpgrade - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_runtime_upgrade() -> $return { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); - let pallet_name = << - $trait_instance - as - $system::Config - >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); - - $crate::log::info!( - target: $crate::LOG_TARGET, - "⚠️ {} declares internal migrations (which *might* execute). \ - On-chain `{:?}` vs current storage version `{:?}`", - pallet_name, - ::on_chain_storage_version(), - ::current_storage_version(), - ); - - { $( $impl )* } - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { - Ok($crate::sp_std::vec::Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { - Ok(()) - } - } - }; - - (@impl_on_runtime_upgrade - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnRuntimeUpgrade - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_runtime_upgrade() -> $crate::dispatch::Weight { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); - let pallet_name = << - $trait_instance - as - $system::Config - >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); - - $crate::log::debug!( - target: $crate::LOG_TARGET, - "✅ no migration for {}", - pallet_name, - ); - - $crate::dispatch::Weight::zero() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { - Ok($crate::sp_std::vec::Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { - Ok(()) - } - } - }; - - (@impl_integrity_test - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - $(#[doc = $doc_attr:tt])* - fn integrity_test() { $( $impl:tt )* } - ) => { - #[cfg(feature = "std")] - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::IntegrityTest - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - $(#[doc = $doc_attr])* - fn integrity_test() { - $( $impl )* - } - } - }; - - (@impl_integrity_test - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - #[cfg(feature = "std")] - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::IntegrityTest - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - {} - }; - - (@impl_on_finalize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_finalize() { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_finalize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); - { $( $impl )* } - } - } - }; - - (@impl_on_finalize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_finalize($param: $param_ty) { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); - { $( $impl )* } - } - } - }; - - (@impl_on_finalize - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - } - }; - - (@impl_on_idle - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn on_idle($param1:ident : $param1_ty:ty, $param2:ident: $param2_ty:ty) -> $return:ty { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnIdle<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn on_idle($param1: $param1_ty, $param2: $param2_ty) -> $return { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_idle")); - { $( $impl )* } - } - } - }; - - (@impl_on_idle - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnIdle<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - } - }; - - (@impl_offchain - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn offchain_worker() { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $( $impl )* } - } - }; - - (@impl_offchain - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn offchain_worker($param: $param_ty) { $( $impl )* } - } - }; - - (@impl_offchain - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - {} - }; - - // Expansion for _origin_ dispatch functions with no return type. - (@impl_function - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - $origin_ty:ty; - $ignore:ident; - $(#[$fn_attr:meta])* - $vis:vis fn $name:ident ( - $origin:ident $(, $param:ident : $param_ty:ty )* - ) { $( $impl:tt )* } - ) => { - #[allow(unreachable_code)] - $(#[$fn_attr])* - $vis fn $name( - $origin: $origin_ty $(, $param: $param_ty )* - ) -> $crate::dispatch::DispatchResult { - $crate::storage::with_storage_layer(|| { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - { $( $impl )* } - Ok(()) - }) - } - }; - - // Expansion for _origin_ dispatch functions with explicit return type. - (@impl_function - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - $origin_ty:ty; - $ignore:ident; - $(#[$fn_attr:meta])* - $vis:vis fn $name:ident ( - $origin:ident $(, $param:ident : $param_ty:ty )* - ) -> $result:ty { $( $impl:tt )* } - ) => { - $(#[$fn_attr])* - $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::storage::with_storage_layer(|| { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - $( $impl )* - }) - } - }; - - // Declare a `Call` variant parameter that should be encoded `compact`. - (@create_call_enum - $call_type:ident; - <$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)?> - { $( $other_where_bounds:tt )* } - { $( $generated_variants:tt )* } - { $( $current_params:tt )* } - variant $fn_name:ident; - $( #[doc = $doc_attr:tt] )* - #[compact] - $name:ident : $type:ty; - $( $rest:tt )* - ) => { - $crate::decl_module! { - @create_call_enum - $call_type; - <$trait_instance: $trait_name $(, $instance: $instantiable $(= $module_default_instance)? )?> - { $( $other_where_bounds )* } - { $( $generated_variants )* } - { - $( $current_params )* - #[codec(compact)] - $name: $type, - } - variant $fn_name; - $( #[doc = $doc_attr] )* - $( $rest )* - } - }; - - // Declare a `Call` variant parameter. - (@create_call_enum - $call_type:ident; - <$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)?> - { $( $other_where_bounds:tt )* } - { $( $generated_variants:tt )* } - { $( $current_params:tt )* } - variant $fn_name:ident; - $(#[doc = $doc_attr:tt])* - $name:ident : $type:ty; - $( $rest:tt )* - ) => { - $crate::decl_module! { - @create_call_enum - $call_type; - <$trait_instance: $trait_name $(, $instance: $instantiable $(= $module_default_instance)? )?> - { $( $other_where_bounds )* } - { $( $generated_variants )* } - { - $( $current_params )* - $name: $type, - } - variant $fn_name; - $( #[doc = $doc_attr] )* - $( $rest )* - } - }; - - (@create_call_enum - $call_type:ident; - <$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)?> - { $( $other_where_bounds:tt )* } - { $( $generated_variants:tt )* } - { $( $current_params:tt )* } - variant $fn_name:ident; - $(#[doc = $doc_attr:tt])* - $( - variant $next_fn_name:ident; - $( $rest:tt )* - )? - ) => { - $crate::decl_module! { - @create_call_enum - $call_type; - <$trait_instance: $trait_name $(, $instance: $instantiable $(= $module_default_instance)? )?> - { $( $other_where_bounds )* } - { - $( $generated_variants )* - #[allow(non_camel_case_types)] - $(#[doc = $doc_attr])* - $fn_name { - $( $current_params )* - }, - } - {} - $( - variant $next_fn_name; - $( $rest )* - )? - } - }; - - (@create_call_enum - $call_type:ident; - <$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)?> - { $( $other_where_bounds:tt )* } - { $( $generated_variants:tt )* } - {} - ) => { - /// Dispatchable calls. - /// - /// Each variant of this enum maps to a dispatchable function from the associated module. - #[derive($crate::codec::Encode, $crate::codec::Decode, $crate::scale_info::TypeInfo)] - #[scale_info(skip_type_params($trait_instance $(, $instance)?), capture_docs = "always")] - pub enum $call_type<$trait_instance: $trait_name$(, $instance: $instantiable $( = $module_default_instance)?)?> - where $( $other_where_bounds )* - { - #[doc(hidden)] - #[codec(skip)] - __PhantomItem($crate::sp_std::marker::PhantomData<($trait_instance, $($instance)?)>, $crate::Never), - $( $generated_variants )* - } - }; - - // Implementation for `GetStorageVersion`. - (@impl_get_storage_version - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - $( $storage_version:tt )+ - ) => { - // Implement `GetStorageVersion` for `Pallet` - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::GetStorageVersion - for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - type CurrentStorageVersion = $crate::traits::StorageVersion; - - fn current_storage_version() -> Self::CurrentStorageVersion { - $( $storage_version )* - } - - fn on_chain_storage_version() -> $crate::traits::StorageVersion { - $crate::traits::StorageVersion::get::() - } - } - - // Implement `OnGenesis` for `Module` - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::OnGenesis - for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn on_genesis() { - let storage_version = ::current_storage_version(); - storage_version.put::(); - } - } - }; - - // Implementation for `GetStorageVersion` when no storage version is passed. - (@impl_get_storage_version - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - // Implement `GetStorageVersion` for `Pallet` - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::GetStorageVersion - for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - type CurrentStorageVersion = $crate::traits::NoStorageVersionSet; - - fn current_storage_version() -> Self::CurrentStorageVersion { - Default::default() - } - - fn on_chain_storage_version() -> $crate::traits::StorageVersion { - $crate::traits::StorageVersion::get::() - } - } - - // Implement `OnGenesis` for `Module` - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::OnGenesis - for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn on_genesis() { - let storage_version = $crate::traits::StorageVersion::default(); - storage_version.put::(); - } - } - }; - - // The main macro expansion that actually renders the module code. - - (@imp - $(#[$attr:meta])* - pub struct $mod_type:ident< - $trait_instance:ident: $trait_name:ident - $(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? - > - for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident { - $( - $(#[doc = $doc_attr:tt])* - #[weight = $weight:expr] - $(#[$fn_attr:meta])* - $fn_vis:vis fn $fn_name:ident( - $from:ident $( , $(#[$codec_attr:ident])* $param_name:ident : $param:ty)* - ) $( -> $result:ty )* { $( $impl:tt )* } - { $($fn_instance:ident: $fn_instantiable:path)? } - )* - } - { $( $other_where_bounds:tt )* } - { $( $deposit_event:tt )* } - { $( $on_initialize:tt )* } - { $( $on_runtime_upgrade:tt )* } - { $( $on_idle:tt )* } - { $( $on_finalize:tt )* } - { $( $offchain:tt )* } - { $( $constants:tt )* } - { $( $error_type:tt )* } - { $( $integrity_test:tt )* } - { $( $storage_version:tt )* } - ) => { - $crate::__check_reserved_fn_name! { $( $fn_name )* } - - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive(Clone, Copy, PartialEq, Eq, $crate::RuntimeDebug)] - $( #[$attr] )* - pub struct $mod_type< - $trait_instance: $trait_name - $(, $instance: $instantiable $( = $module_default_instance)?)? - >($crate::sp_std::marker::PhantomData<($trait_instance, $( $instance)?)>) where - $( $other_where_bounds )*; - - /// Type alias to `Module`, to be used by `construct_runtime`. - #[allow(dead_code)] - pub type Pallet<$trait_instance $(, $instance $( = $module_default_instance)?)?> - = $mod_type<$trait_instance $(, $instance)?>; - - $crate::__create_tt_macro! { - tt_error_token, - } - - $crate::decl_module! { - @impl_on_initialize - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $on_initialize )* - } - - $crate::decl_module! { - @impl_try_state_default - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - } - - $crate::decl_module! { - @impl_on_runtime_upgrade - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $on_runtime_upgrade )* - } - - $crate::decl_module! { - @impl_on_finalize - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $on_finalize )* - } - - $crate::decl_module! { - @impl_on_idle - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $on_idle )* - } - - $crate::decl_module! { - @impl_offchain - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $offchain )* - } - - $crate::decl_module! { - @impl_deposit_event - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - $system; - { $( $other_where_bounds )* } - $( $deposit_event )* - } - - $crate::decl_module! { - @impl_integrity_test - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $integrity_test )* - } - - /// Can also be called using [`Call`]. - /// - /// [`Call`]: enum.Call.html - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - $( - $crate::decl_module! { - @impl_function - $mod_type<$trait_instance: $trait_name $(, $fn_instance: $fn_instantiable)?>; - $origin_type; - $from; - $(#[doc = $doc_attr])* - /// - /// NOTE: Calling this function will bypass origin filters. - $(#[$fn_attr])* - $fn_vis fn $fn_name ( - $from $(, $param_name : $param )* - ) $( -> $result )* { $( $impl )* } - } - )* - } - - $crate::decl_module! { - @create_call_enum - $call_type; - <$trait_instance: $trait_name $(, $instance: $instantiable $(= $module_default_instance)? )?> - { $( $other_where_bounds )* } - {} - {} - $( - variant $fn_name; - $(#[doc = $doc_attr])* - $( - $(#[$codec_attr])* - $param_name : $param; - )* - )* - } - - $crate::paste::paste! { - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> - $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - $( - #[doc = "Create a call with the variant `" $fn_name "`."] - pub fn [< new_call_variant_ $fn_name >]( - $( $param_name: $param ),* - ) -> Self { - Self::$fn_name { - $( $param_name ),* - } - } - )* - } - } - - $crate::decl_module! { - @impl_get_storage_version - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - $( $storage_version )* - } - - // Implement weight calculation function for Call - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::GetDispatchInfo - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn get_dispatch_info(&self) -> $crate::dispatch::DispatchInfo { - match *self { - $( - $call_type::$fn_name { $( ref $param_name ),* } => { - let __pallet_base_weight = $weight; - let __pallet_weight = >::weigh_data( - &__pallet_base_weight, - ($( $param_name, )*) - ); - let __pallet_class = >::classify_dispatch( - &__pallet_base_weight, - ($( $param_name, )*) - ); - let __pallet_pays_fee = >::pays_fee( - &__pallet_base_weight, - ($( $param_name, )*) - ); - $crate::dispatch::DispatchInfo { - weight: __pallet_weight, - class: __pallet_class, - pays_fee: __pallet_pays_fee, - } - }, - )* - $call_type::__PhantomItem(_, _) => unreachable!("__PhantomItem should never be used."), - } - } - } - - // Implement PalletInfoAccess for the module. - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::PalletInfoAccess - for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn index() -> usize { - < - <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo - >::index::() - .expect("Pallet is part of the runtime because pallet `Config` trait is \ - implemented by the runtime") - } - - fn name() -> &'static str { - < - <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo - >::name::() - .expect("Pallet is part of the runtime because pallet `Config` trait is \ - implemented by the runtime") - } - - fn module_name() -> &'static str { - < - <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo - >::module_name::() - .expect("Pallet is part of the runtime because pallet `Config` trait is \ - implemented by the runtime") - } - - fn crate_version() -> $crate::traits::CrateVersion { - $crate::crate_to_crate_version!() - } - } - - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::PalletsInfoAccess - for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn count() -> usize { 1 } - fn infos() -> $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData> { - use $crate::traits::PalletInfoAccess; - let item = $crate::traits::PalletInfoData { - index: Self::index(), - name: Self::name(), - module_name: Self::module_name(), - crate_version: Self::crate_version(), - }; - vec![item] - } - } - - // Implement GetCallName for the Call. - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::GetCallName - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn get_call_name(&self) -> &'static str { - match *self { - $( - $call_type::$fn_name { $( ref $param_name ),* } => { - // Don't generate any warnings for unused variables - let _ = ( $( $param_name ),* ); - stringify!($fn_name) - }, - )* - $call_type::__PhantomItem(_, _) => unreachable!("__PhantomItem should never be used."), - } - } - - fn get_call_names() -> &'static [&'static str] { - &[ - $( - stringify!($fn_name), - )* - ] - } - } - - // manual implementation of clone/eq/partialeq because using derive erroneously requires - // clone/eq/partialeq from T. - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Clone - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn clone(&self) -> Self { - match *self { - $( - $call_type::$fn_name { $( ref $param_name ),* } => - $call_type::$fn_name { $( $param_name: (*$param_name).clone() ),* } - ,)* - _ => unreachable!(), - } - } - } - - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::PartialEq - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn eq(&self, _other: &Self) -> bool { - match *self { - $( - $call_type::$fn_name { $( ref $param_name ),* } => { - let self_params = ( $( $param_name, )* ); - if let $call_type::$fn_name { $( ref $param_name ),* } = *_other { - self_params == ( $( $param_name, )* ) - } else { - match *_other { - $call_type::__PhantomItem(_, _) => unreachable!(), - _ => false, - } - } - } - )* - _ => unreachable!(), - } - } - } - - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Eq - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - {} - - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::fmt::Debug - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn fmt( - &self, - _f: &mut $crate::dispatch::fmt::Formatter, - ) -> $crate::dispatch::result::Result<(), $crate::dispatch::fmt::Error> { - match *self { - $( - $call_type::$fn_name { $( ref $param_name ),* } => - write!(_f, "{}{:?}", - stringify!($fn_name), - ( $( $param_name.clone(), )* ) - ) - ,)* - _ => unreachable!(), - } - } - } - - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::UnfilteredDispatchable - for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - type RuntimeOrigin = $origin_type; - fn dispatch_bypass_filter(self, _origin: Self::RuntimeOrigin) -> $crate::dispatch::DispatchResultWithPostInfo { - match self { - $( - $call_type::$fn_name { $( $param_name ),* } => { - $crate::decl_module!( - @call - $from - $mod_type<$trait_instance $(, $fn_instance)?> $fn_name _origin $system [ $( $param_name ),* ] - ) - }, - )* - $call_type::__PhantomItem(_, _) => { unreachable!("__PhantomItem should never be used.") }, - } - } - } - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Callable<$trait_instance> - for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - type RuntimeCall = $call_type<$trait_instance $(, $instance)?>; - } - - $crate::__dispatch_impl_metadata! { - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> - { $( $other_where_bounds )* } - $call_type $origin_type - { - $( - $(#[doc = $doc_attr])* - fn $fn_name($from $(, $(#[$codec_attr])* $param_name : $param )*); - )* - } - } - $crate::__impl_error_metadata! { - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> - { $( $other_where_bounds )* } - $( $error_type )* - } - $crate::__impl_docs_metadata! { - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> - { $( $other_where_bounds )* } - } - $crate::__impl_module_constants_metadata ! { - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> - { $( $other_where_bounds )* } - $( $constants )* - } - - $crate::__generate_dummy_part_checker!(); - } -} - -/// Implement metadata for dispatch. -#[macro_export] -#[doc(hidden)] -macro_rules! __dispatch_impl_metadata { - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> - { $( $other_where_bounds:tt )* } - $call_type:ident - $($rest:tt)* - ) => { - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - #[doc(hidden)] - #[allow(dead_code)] - pub fn call_functions() -> $crate::metadata_ir::PalletCallMetadataIR { - $crate::scale_info::meta_type::<$call_type<$trait_instance $(, $instance)?>>().into() - } - } - } -} - -/// Implement metadata for pallet error. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_error_metadata { - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> - { $( $other_where_bounds:tt )* } - __NO_ERROR_DEFINED - ) => { - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - #[doc(hidden)] - #[allow(dead_code)] - pub fn error_metadata() -> Option<$crate::metadata_ir::PalletErrorMetadataIR> { - None - } - } - }; - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> - { $( $other_where_bounds:tt )* } - $( $error_type:tt )* - ) => { - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - #[doc(hidden)] - #[allow(dead_code)] - pub fn error_metadata() -> Option<$crate::metadata_ir::PalletErrorMetadataIR> { - Some($crate::metadata_ir::PalletErrorMetadataIR { - ty: $crate::scale_info::meta_type::<$( $error_type )*>() - }) - } - } - }; -} - -/// Implement metadata for pallet documentation. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_docs_metadata { - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> - { $( $other_where_bounds:tt )* } - ) => { - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> - where $( $other_where_bounds )* - { - #[doc(hidden)] - #[allow(dead_code)] - pub fn pallet_documentation_metadata() -> $crate::sp_std::vec::Vec<&'static str> { - $crate::sp_std::vec![] - } - } - }; -} - -/// Implement metadata for module constants. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_module_constants_metadata { - // Without instance - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident> - { $( $other_where_bounds:tt )* } - $( - $( #[doc = $doc_attr:tt] )* - $name:ident: $type:ty = $value:expr; - )* - ) => { - $crate::paste::item! { - $crate::__impl_module_constants_metadata! { - GENERATE_CODE - $mod_type<$trait_instance: $trait_name> - { $( $other_where_bounds )* } - $( - $( #[doc = $doc_attr] )* - [< $name DefaultByteGetter >] - $name<$trait_instance: $trait_name>: $type = $value; - )* - } - } - }; - // With instance - ( - $mod_type:ident<$trait_instance:ident: $trait_name:ident, $instance:ident: $instantiable:path> - { $( $other_where_bounds:tt )* } - $( - $( #[doc = $doc_attr:tt] )* - $name:ident: $type:ty = $value:expr; - )* - ) => { - $crate::paste::item! { - $crate::__impl_module_constants_metadata! { - GENERATE_CODE - $mod_type<$trait_instance: $trait_name, $instance: $instantiable> - { $( $other_where_bounds )* } - $( - $( #[doc = $doc_attr] )* - [< $name DefaultByteGetter >] - $name<$trait_instance: $trait_name, $instance: $instantiable>: $type = $value; - )* - } - } - }; - // Do the code generation - (GENERATE_CODE - $mod_type:ident<$trait_instance:ident: $trait_name:ident $(, $instance:ident: $instantiable:path)?> - { $( $other_where_bounds:tt )* } - $( - $( #[doc = $doc_attr:tt] )* - $default_byte_name:ident - $name:ident< - $const_trait_instance:ident: $const_trait_name:ident $( - , $const_instance:ident: $const_instantiable:path - )* - >: $type:ty = $value:expr; - )* - ) => { - impl<$trait_instance: 'static + $trait_name $(, $instance: $instantiable)?> - $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - #[doc(hidden)] - #[allow(dead_code)] - pub fn pallet_constants_metadata() -> $crate::sp_std::vec::Vec<$crate::metadata_ir::PalletConstantMetadataIR> { - // Create the `ByteGetter`s - $( - #[allow(non_upper_case_types)] - #[allow(non_camel_case_types)] - struct $default_byte_name< - $const_trait_instance: $const_trait_name $( - , $const_instance: $const_instantiable - )? - >($crate::dispatch::marker::PhantomData< - ($const_trait_instance, $( $const_instance)?) - >); - impl<$const_trait_instance: 'static + $const_trait_name $( - , $const_instance: $const_instantiable)? - > $default_byte_name <$const_trait_instance $(, $const_instance)?> - { - fn default_byte(&self) -> $crate::dispatch::Vec { - let value: $type = $value; - $crate::dispatch::Encode::encode(&value) - } - } - )* - $crate::sp_std::vec![ - $( - $crate::metadata_ir::PalletConstantMetadataIR { - name: stringify!($name), - ty: $crate::scale_info::meta_type::<$type>(), - value: $default_byte_name::<$const_trait_instance $(, $const_instance)?>( - Default::default() - ).default_byte(), - docs: $crate::sp_std::vec![ $( $doc_attr ),* ], - } - ),* - ] - } - } - } -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __check_reserved_fn_name { - (deposit_event $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error deposit_event); - }; - (on_initialize $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error on_initialize); - }; - (on_runtime_upgrade $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error on_runtime_upgrade); - }; - (on_idle $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error on_idle); - }; - (on_finalize $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error on_finalize); - }; - (offchain_worker $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error offchain_worker); - }; - (integrity_test $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error integrity_test); - }; - ($t:ident $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!($( $rest )*); - }; - () => {}; - (@compile_error $ident:ident) => { - compile_error!( - concat!( - "Invalid call fn name: `", - stringify!($ident), - "`, name is reserved and doesn't match expected signature, please refer to ", - "`decl_module!` documentation to see the appropriate usage, or rename it to an ", - "unreserved keyword." - ), - ); - }; - (@compile_error_renamed $ident:ident $new_ident:ident) => { - compile_error!( - concat!( - "`", - stringify!($ident), - "` was renamed to `", - stringify!($new_ident), - "`. Please rename your function accordingly.", - ), - ); - }; -} - #[cfg(test)] // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] -#[allow(deprecated)] -mod tests { +mod weight_tests { use super::*; - use crate::{ - dispatch::{DispatchClass, DispatchInfo, Pays}, - metadata_ir::*, - traits::{ - CallerTrait, CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, - OnInitialize, OnRuntimeUpgrade, PalletInfo, - }, - }; - use sp_weights::{RuntimeDbWeight, Weight}; - - pub trait Config: system::Config + Sized - where - Self::AccountId: From, - { - } - - pub mod system { - use super::*; - - pub trait Config: 'static { - type AccountId; - type RuntimeCall; - type BaseCallFilter; - type RuntimeOrigin: crate::traits::OriginTrait; - type BlockNumber: Into; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: Get; - } - - pub use super::super::RawOrigin; - - pub type Origin = RawOrigin<::AccountId>; - } - - decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system = system, T::AccountId: From { - /// Hi, this is a comment. - #[weight = 0] - fn aux_0(_origin) -> DispatchResult { unreachable!() } - - #[weight = 0] - fn aux_1(_origin, #[compact] _data: u32,) -> DispatchResult { unreachable!() } - - #[weight = 0] - fn aux_2(_origin, _data: i32, _data2: String) -> DispatchResult { unreachable!() } - - #[weight = 3] - fn aux_3(_origin) -> DispatchResult { unreachable!() } - - #[weight = 0] - fn aux_4(_origin, _data: i32) -> DispatchResult { unreachable!() } - - #[weight = 0] - fn aux_5(_origin, _data: i32, #[compact] _data2: u32,) -> DispatchResult { unreachable!() } - - #[weight = (5, DispatchClass::Operational)] - fn operational(_origin) { unreachable!() } - - fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } Weight::from_parts(7, 0) } - fn on_idle(n: T::BlockNumber, remaining_weight: Weight,) -> Weight { - if n.into() == 42 || remaining_weight == Weight::from_parts(42, 0) { panic!("on_idle") } - Weight::from_parts(7, 0) - } - fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } - fn on_runtime_upgrade() -> Weight { Weight::from_parts(10, 0) } - fn offchain_worker() {} - /// Some doc - fn integrity_test() { panic!("integrity_test") } - } - } - - #[derive(Eq, PartialEq, Clone, crate::RuntimeDebug, scale_info::TypeInfo)] - pub struct TraitImpl {} - impl Config for TraitImpl {} - - type Test = Module; - - impl PalletInfo for TraitImpl { - fn index() -> Option { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::() { - return Some(0) - } - - None - } - fn name() -> Option<&'static str> { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::() { - return Some("Test") - } - - None - } - fn module_name() -> Option<&'static str> { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::() { - return Some("tests") - } - - None - } - fn crate_version() -> Option { - let type_id = sp_std::any::TypeId::of::

(); - if type_id == sp_std::any::TypeId::of::() { - return Some(frame_support::crate_to_crate_version!()) - } - - None - } - } - - #[derive( - TypeInfo, crate::RuntimeDebug, Eq, PartialEq, Clone, Encode, Decode, MaxEncodedLen, - )] - pub struct OuterOrigin; - - impl From::AccountId>> for OuterOrigin { - fn from(_: RawOrigin<::AccountId>) -> Self { - unimplemented!("Not required in tests!") - } - } - - impl CallerTrait<::AccountId> for OuterOrigin { - fn into_system(self) -> Option::AccountId>> { - unimplemented!("Not required in tests!") - } - - fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { - unimplemented!("Not required in tests!") - } - } - - impl crate::traits::OriginTrait for OuterOrigin { - type Call = ::RuntimeCall; - type PalletsOrigin = OuterOrigin; - type AccountId = ::AccountId; - - fn add_filter(&mut self, _filter: impl Fn(&Self::Call) -> bool + 'static) { - unimplemented!("Not required in tests!") - } - - fn reset_filter(&mut self) { - unimplemented!("Not required in tests!") - } - - fn set_caller_from(&mut self, _other: impl Into) { - unimplemented!("Not required in tests!") - } - - fn filter_call(&self, _call: &Self::Call) -> bool { - unimplemented!("Not required in tests!") - } - - fn caller(&self) -> &Self::PalletsOrigin { - unimplemented!("Not required in tests!") - } - - fn into_caller(self) -> Self::PalletsOrigin { - unimplemented!("Not required in tests!") - } - - fn try_with_caller( - self, - _f: impl FnOnce(Self::PalletsOrigin) -> Result, - ) -> Result { - unimplemented!("Not required in tests!") - } - - fn none() -> Self { - unimplemented!("Not required in tests!") - } - fn root() -> Self { - unimplemented!("Not required in tests!") - } - fn signed(_by: ::AccountId) -> Self { - unimplemented!("Not required in tests!") - } - fn as_signed(self) -> Option { - unimplemented!("Not required in tests!") - } - fn as_system_ref(&self) -> Option<&RawOrigin> { - unimplemented!("Not required in tests!") - } - } - - impl system::Config for TraitImpl { - type RuntimeOrigin = OuterOrigin; - type AccountId = u32; - type RuntimeCall = (); - type BaseCallFilter = frame_support::traits::Everything; - type BlockNumber = u32; - type PalletInfo = Self; - type DbWeight = (); - } - - #[test] - fn module_json_metadata() { - let metadata = Module::::call_functions(); - let expected_metadata = - PalletCallMetadataIR { ty: scale_info::meta_type::>() }; - assert_eq!(expected_metadata, metadata); - } - - #[test] - fn compact_attr() { - let call: Call = Call::aux_1 { _data: 1 }; - let encoded = call.encode(); - assert_eq!(2, encoded.len()); - assert_eq!(vec![1, 4], encoded); - - let call: Call = Call::aux_5 { _data: 1, _data2: 2 }; - let encoded = call.encode(); - assert_eq!(6, encoded.len()); - assert_eq!(vec![5, 1, 0, 0, 0, 8], encoded); - } - - #[test] - fn encode_is_correct_and_decode_works() { - let call: Call = Call::aux_0 {}; - let encoded = call.encode(); - assert_eq!(vec![0], encoded); - let decoded = Call::::decode(&mut &encoded[..]).unwrap(); - assert_eq!(decoded, call); - - let call: Call = Call::aux_2 { _data: 32, _data2: "hello".into() }; - let encoded = call.encode(); - assert_eq!(vec![2, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); - let decoded = Call::::decode(&mut &encoded[..]).unwrap(); - assert_eq!(decoded, call); - } - - #[test] - #[should_panic(expected = "on_initialize")] - fn on_initialize_should_work_1() { - as OnInitialize>::on_initialize(42); - } - - #[test] - fn on_initialize_should_work_2() { - assert_eq!( - as OnInitialize>::on_initialize(10), - Weight::from_parts(7, 0) - ); - } - - #[test] - #[should_panic(expected = "on_idle")] - fn on_idle_should_work_1() { - as OnIdle>::on_idle(42, Weight::from_parts(9, 0)); - } - - #[test] - #[should_panic(expected = "on_idle")] - fn on_idle_should_work_2() { - as OnIdle>::on_idle(9, Weight::from_parts(42, 0)); - } - - #[test] - fn on_idle_should_work_3() { - assert_eq!( - as OnIdle>::on_idle(10, Weight::from_parts(11, 0)), - Weight::from_parts(7, 0) - ); - } - - #[test] - #[should_panic(expected = "on_finalize")] - fn on_finalize_should_work() { - as OnFinalize>::on_finalize(42); - } - - #[test] - fn on_runtime_upgrade_should_work() { - sp_io::TestExternalities::default().execute_with(|| { - assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), - Weight::from_parts(10, 0) - ) - }); - } - - #[test] - fn weight_should_attach_to_call_enum() { - // operational. - assert_eq!( - Call::::operational {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_parts(5, 0), - class: DispatchClass::Operational, - pays_fee: Pays::Yes - }, - ); - // custom basic - assert_eq!( - Call::::aux_3 {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_parts(3, 0), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - }, - ); - } - - #[test] - fn call_name() { - let name = Call::::aux_3 {}.get_call_name(); - assert_eq!("aux_3", name); - } - - #[test] - fn get_call_names() { - let call_names = Call::::get_call_names(); - assert_eq!( - ["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], - call_names - ); - } - - #[test] - #[should_panic(expected = "integrity_test")] - fn integrity_test_should_work() { - as IntegrityTest>::integrity_test(); - } + use sp_core::parameter_types; + use sp_runtime::{generic, traits::BlakeTwo256}; + use sp_weights::RuntimeDbWeight; - #[test] - fn test_new_call_variant() { - Call::::new_call_variant_aux_0(); - } + pub use self::frame_system::{Call, Config, Pallet}; - pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { + fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee: Default::default(), } } - pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { + fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } } -} - -#[cfg(test)] -// Do not complain about unused `dispatch` and `dispatch_aux`. -#[allow(dead_code)] -mod weight_tests { - use super::{tests::*, *}; - use sp_core::parameter_types; - use sp_runtime::{generic, traits::BlakeTwo256}; - use sp_weights::RuntimeDbWeight; - - pub use self::frame_system::{Call, Config, Pallet}; #[crate::pallet(dev_mode)] pub mod frame_system { @@ -3562,12 +673,12 @@ mod weight_tests { use crate::pallet_prelude::*; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: 'static { - type BlockNumber: Parameter + Default + MaxEncodedLen; + type Block: Parameter + sp_runtime::traits::Block; type AccountId; type Balance; type BaseCallFilter: crate::traits::Contains; @@ -3633,6 +744,11 @@ mod weight_tests { pub mod pallet_prelude { pub type OriginFor = ::RuntimeOrigin; + + pub type HeaderFor = + <::Block as sp_runtime::traits::HeaderProvider>::HeaderT; + + pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; } } @@ -3645,10 +761,6 @@ mod weight_tests { crate::construct_runtime!( pub enum Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, { System: self::frame_system, } @@ -3662,7 +774,7 @@ mod weight_tests { } impl Config for Runtime { - type BlockNumber = BlockNumber; + type Block = Block; type AccountId = AccountId; type Balance = Balance; type BaseCallFilter = crate::traits::Everything; diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs deleted file mode 100644 index fa711389abe18..0000000000000 --- a/frame/support/src/error.rs +++ /dev/null @@ -1,163 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Macro for declaring a module error. - -#[doc(hidden)] -pub use sp_runtime::traits::{BadOrigin, LookupError}; - -/// Declare an error type for a runtime module. -/// -/// `decl_error!` supports only variants that do not hold any data. The dispatchable -/// functions return [`DispatchResult`](sp_runtime::DispatchResult). The error type -/// implements `From for DispatchResult` to make the error type usable as error -/// in the dispatchable functions. -/// -/// It is required that the error type is registered in `decl_module!` to make the error -/// exported in the metadata. -/// -/// # Usage -/// -/// ``` -/// # use frame_support::{decl_error, decl_module}; -/// # -/// decl_error! { -/// /// Errors that can occur in my module. -/// pub enum MyError for Module { -/// /// Hey this is an error message that indicates bla. -/// MyCoolErrorMessage, -/// /// You are just not cool enough for my module! -/// YouAreNotCoolEnough, -/// } -/// } -/// -/// # use frame_system::Config; -/// -/// // You need to register the error type in `decl_module!` as well to make the error -/// // exported in the metadata. -/// -/// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { -/// type Error = MyError; -/// -/// #[weight = 0] -/// fn do_something(origin) -> frame_support::dispatch::DispatchResult { -/// Err(MyError::::YouAreNotCoolEnough.into()) -/// } -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// -/// For instantiable modules you also need to give the instance generic type and bound to the -/// error declaration. -#[macro_export] -#[deprecated(note = "Will be removed soon; use the attribute `#[pallet]` macro instead. - For more info, see: ")] -macro_rules! decl_error { - ( - $(#[$attr:meta])* - pub enum $error:ident - for $module:ident< - $generic:ident: $trait:path - $(, $inst_generic:ident: $instance:path)? - > - $( where $( $where_ty:ty: $where_bound:path ),* $(,)? )? - { - $( - $( #[doc = $doc_attr:tt] )* - $name:ident - ),* - $(,)? - } - ) => { - $(#[$attr])* - #[derive( - $crate::codec::Encode, - $crate::codec::Decode, - $crate::scale_info::TypeInfo, - $crate::PalletError, - )] - #[scale_info(skip_type_params($generic $(, $inst_generic)?), capture_docs = "always")] - pub enum $error<$generic: $trait $(, $inst_generic: $instance)?> - $( where $( $where_ty: $where_bound ),* )? - { - #[doc(hidden)] - #[codec(skip)] - __Ignore( - $crate::sp_std::marker::PhantomData<($generic, $( $inst_generic)?)>, - $crate::Never, - ), - $( - $( #[doc = $doc_attr] )* - $name - ),* - } - - impl<$generic: $trait $(, $inst_generic: $instance)?> $crate::sp_std::fmt::Debug - for $error<$generic $(, $inst_generic)?> - $( where $( $where_ty: $where_bound ),* )? - { - fn fmt(&self, f: &mut $crate::sp_std::fmt::Formatter<'_>) -> $crate::sp_std::fmt::Result { - f.write_str(self.as_str()) - } - } - - impl<$generic: $trait $(, $inst_generic: $instance)?> $error<$generic $(, $inst_generic)?> - $( where $( $where_ty: $where_bound ),* )? - { - fn as_str(&self) -> &'static str { - match self { - Self::__Ignore(_, _) => unreachable!("`__Ignore` can never be constructed"), - $( - $error::$name => stringify!($name), - )* - } - } - } - - impl<$generic: $trait $(, $inst_generic: $instance)?> From<$error<$generic $(, $inst_generic)?>> - for &'static str - $( where $( $where_ty: $where_bound ),* )? - { - fn from(err: $error<$generic $(, $inst_generic)?>) -> &'static str { - err.as_str() - } - } - - impl<$generic: $trait $(, $inst_generic: $instance)?> From<$error<$generic $(, $inst_generic)?>> - for $crate::sp_runtime::DispatchError - $( where $( $where_ty: $where_bound ),* )? - { - fn from(err: $error<$generic $(, $inst_generic)?>) -> Self { - use $crate::codec::Encode; - let index = <$generic::PalletInfo as $crate::traits::PalletInfo> - ::index::<$module<$generic $(, $inst_generic)?>>() - .expect("Every active module has an index in the runtime; qed") as u8; - let mut error = err.encode(); - error.resize($crate::MAX_MODULE_ERROR_ENCODED_SIZE, 0); - - $crate::sp_runtime::DispatchError::Module($crate::sp_runtime::ModuleError { - index, - error: TryInto::try_into(error).expect("encoded error is resized to be equal to the maximum encoded error size; qed"), - message: Some(err.as_str()), - }) - } - } - }; -} diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs deleted file mode 100644 index ba3e5a2750b63..0000000000000 --- a/frame/support/src/event.rs +++ /dev/null @@ -1,294 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Macros that define an Event types. Events can be used to easily report changes or conditions -//! in your runtime to external entities like users, chain explorers, or dApps. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -/// Implement the `Event` for a module. -/// -/// # Simple Event Example: -/// -/// ```rust -/// frame_support::decl_event!( -/// pub enum Event { -/// Success, -/// Failure(String), -/// } -/// ); -/// -/// # fn main() {} -/// ``` -/// -/// # Generic Event Example: -/// -/// ```rust -/// trait Config { -/// type Balance; -/// type Token; -/// } -/// -/// mod event1 { -/// // Event that specifies the generic parameter explicitly (`Balance`). -/// frame_support::decl_event!( -/// pub enum Event where Balance = ::Balance { -/// Message(Balance), -/// } -/// ); -/// } -/// -/// mod event2 { -/// // Event that uses the generic parameter `Balance`. -/// // If no name for the generic parameter is specified explicitly, -/// // the name will be taken from the type name of the trait. -/// frame_support::decl_event!( -/// pub enum Event where ::Balance { -/// Message(Balance), -/// } -/// ); -/// } -/// -/// mod event3 { -/// // And we even support declaring multiple generic parameters! -/// frame_support::decl_event!( -/// pub enum Event where ::Balance, ::Token { -/// Message(Balance, Token), -/// } -/// ); -/// } -/// -/// # fn main() {} -/// ``` -/// -/// The syntax for generic events requires the `where`. -/// -/// # Generic Event with Instance Example: -/// -/// ```rust -/// # struct DefaultInstance; -/// # trait Instance {} -/// # impl Instance for DefaultInstance {} -/// trait Config { -/// type Balance; -/// type Token; -/// } -/// -/// // For module with instances, DefaultInstance is optional -/// frame_support::decl_event!( -/// pub enum Event where -/// ::Balance, -/// ::Token -/// { -/// Message(Balance, Token), -/// } -/// ); -/// # fn main() {} -/// ``` -#[macro_export] -#[deprecated(note = "Will be removed soon; use the attribute `#[pallet]` macro instead. - For more info, see: ")] -macro_rules! decl_event { - ( - $(#[$attr:meta])* - pub enum Event<$evt_generic_param:ident $(, $instance:ident $(: $instantiable:ident)? $( = $event_default_instance:path)? )?> where - $( $tt:tt )* - ) => { - $crate::__decl_generic_event!( - $( #[ $attr ] )*; - $evt_generic_param; - $($instance $( = $event_default_instance)? )?; - { $( $tt )* }; - ); - }; - ( - $(#[$attr:meta])* - pub enum Event { - $( - $events:tt - )* - } - ) => { - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive( - Clone, PartialEq, Eq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::scale_info::TypeInfo, - $crate::RuntimeDebug, - )] - #[scale_info(capture_docs = "always")] - /// Events for this module. - /// - $(#[$attr])* - pub enum Event { - $( - $events - )* - } - impl From for () { - fn from(_: Event) -> () { () } - } - } -} - -#[macro_export] -#[doc(hidden)] -// This parsing to retrieve last ident on unnamed generic could be improved. -// but user can still name it if the parsing fails. And improving parsing seems difficult. -macro_rules! __decl_generic_event { - ( - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $( $tt:tt )* }; - ) => { - $crate::__decl_generic_event!(@format_generic - $( #[ $attr ] )*; - $event_generic_param; - $($instance $( = $event_default_instance)? )?; - { $( $tt )* }; - {}; - ); - }; - // Finish formatting on an unnamed one - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { <$generic:ident as $trait:path>::$trait_type:ident $(,)? { $( $events:tt )* } }; - {$( $parsed:tt)*}; - ) => { - $crate::__decl_generic_event!(@generate - $( #[ $attr ] )*; - $event_generic_param; - $($instance $( = $event_default_instance)? )?; - { $($events)* }; - { $($parsed)*, $trait_type = <$generic as $trait>::$trait_type }; - ); - }; - // Finish formatting on a named one - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $generic_rename:ident = $generic_type:ty $(,)? { $( $events:tt )* } }; - { $($parsed:tt)* }; - ) => { - $crate::__decl_generic_event!(@generate - $(#[$attr])*; - $event_generic_param; - $($instance $( = $event_default_instance)? )?; - { $($events)* }; - { $($parsed)*, $generic_rename = $generic_type }; - ); - }; - // Parse named - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $generic_rename:ident = $generic_type:ty, $($rest:tt)* }; - {$( $parsed:tt)*}; - ) => { - $crate::__decl_generic_event!(@format_generic - $( #[ $attr ] )*; - $event_generic_param; - $( $instance $( = $event_default_instance)? )?; - { $($rest)* }; - { $($parsed)*, $generic_rename = $generic_type }; - ); - }; - // Parse unnamed - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { <$generic:ident as $trait:path>::$trait_type:ident, $($rest:tt)* }; - {$($parsed:tt)*}; - ) => { - $crate::__decl_generic_event!(@format_generic - $( #[ $attr ] )*; - $event_generic_param; - $($instance $( = $event_default_instance)? )?; - { $($rest)* }; - { $($parsed)*, $trait_type = <$generic as $trait>::$trait_type }; - ); - }; - // Unnamed type can't be parsed - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $generic_type:ty, $($rest:tt)* }; - { $($parsed:tt)* }; - ) => { - $crate::__decl_generic_event!(@cannot_parse $generic_type); - }; - // Final unnamed type can't be parsed - (@format_generic - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $generic_type:ty { $( $events:tt )* } }; - {$( $parsed:tt)*}; - ) => { - $crate::__decl_generic_event!(@cannot_parse $generic_type); - }; - (@generate - $(#[$attr:meta])*; - $event_generic_param:ident; - $($instance:ident $( = $event_default_instance:path)? )?; - { $( $events:tt )* }; - { ,$( $generic_param:ident = $generic_type:ty ),* }; - ) => { - /// [`RawEvent`] specialized for the configuration [`Config`] - /// - /// [`RawEvent`]: enum.RawEvent.html - /// [`Config`]: trait.Config.html - pub type Event<$event_generic_param $(, $instance $( = $event_default_instance)? )?> = RawEvent<$( $generic_type ),* $(, $instance)? >; - - #[derive( - Clone, PartialEq, Eq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::scale_info::TypeInfo, - $crate::RuntimeDebug, - )] - #[scale_info(capture_docs = "always")] - /// Events for this module. - /// - $(#[$attr])* - pub enum RawEvent<$( $generic_param ),* $(, $instance)? > { - $( - $events - )* - $( - #[doc(hidden)] - #[codec(skip)] - PhantomData($crate::sp_std::marker::PhantomData<$instance>), - )? - } - impl<$( $generic_param ),* $(, $instance)? > From> for () { - fn from(_: RawEvent<$( $generic_param ),* $(, $instance)?>) -> () { () } - } - }; - (@cannot_parse $ty:ty) => { - compile_error!(concat!("The type `", stringify!($ty), "` can't be parsed as an unnamed one, please name it `Name = ", stringify!($ty), "`")); - } -} diff --git a/frame/support/src/genesis_builder_helper.rs b/frame/support/src/genesis_builder_helper.rs new file mode 100644 index 0000000000000..d4144a4d9fd19 --- /dev/null +++ b/frame/support/src/genesis_builder_helper.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helper functions for implementing [`sp_genesis_builder::GenesisBuilder`] for runtimes. +//! +//! Provides common logic. For more info refer to [`sp_genesis_builder::GenesisBuilder`]. + +use frame_support::traits::BuildGenesisConfig; +use sp_genesis_builder::Result as BuildResult; +use sp_runtime::format_runtime_string; + +/// Get the default `GenesisConfig` as a JSON blob. For more info refer to +/// [`sp_genesis_builder::GenesisBuilder::create_default_config`] +pub fn create_default_config() -> sp_std::vec::Vec { + serde_json::to_string(&GC::default()) + .expect("serialization to json is expected to work. qed.") + .into_bytes() +} + +/// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. For +/// more info refer to [`sp_genesis_builder::GenesisBuilder::build_config`]. +pub fn build_config(json: sp_std::vec::Vec) -> BuildResult { + let gc = serde_json::from_slice::(&json) + .map_err(|e| format_runtime_string!("Invalid JSON blob: {}", e))?; + ::build(&gc); + Ok(()) +} diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index dce61378de8b8..8889c93809c7a 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -15,11 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[doc(hidden)] -pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; -#[doc(hidden)] -pub use crate::sp_std::vec::Vec; - pub use sp_inherents::{ CheckInherentsResult, InherentData, InherentIdentifier, IsFatalError, MakeFatalError, }; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 3cd8378be45d1..0c416c73766c8 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -41,14 +41,10 @@ pub use codec; pub use frame_metadata as metadata; #[doc(hidden)] pub use log; -#[cfg(feature = "std")] -#[doc(hidden)] -pub use once_cell; #[doc(hidden)] pub use paste; #[doc(hidden)] pub use scale_info; -#[cfg(feature = "std")] pub use serde; pub use sp_api::metadata_ir; pub use sp_core::{OpaqueMetadata, Void}; @@ -71,17 +67,15 @@ pub use tt_call::*; #[macro_use] pub mod dispatch; -mod hash; -pub mod storage; -#[macro_use] -pub mod event; -pub mod inherent; -#[macro_use] -pub mod error; pub mod crypto; pub mod dispatch_context; +mod hash; +pub mod inherent; pub mod instances; pub mod migrations; +pub mod storage; +#[cfg(test)] +mod tests; pub mod traits; pub mod weights; #[doc(hidden)] @@ -135,82 +129,54 @@ impl TypeId for PalletId { const TYPE_ID: [u8; 4] = *b"modl"; } -/// Generate a new type alias for [`storage::types::StorageValue`], -/// [`storage::types::StorageMap`], [`storage::types::StorageDoubleMap`] -/// and [`storage::types::StorageNMap`]. +/// Generate a [`#[pallet::storage]`](pallet_macros::storage) alias outside of a pallet. /// -/// Useful for creating a *storage-like* struct for test and migrations. +/// This storage alias works similarly to the [`#[pallet::storage]`](pallet_macros::storage) +/// attribute macro. It supports [`StorageValue`](storage::types::StorageValue), +/// [`StorageMap`](storage::types::StorageMap), +/// [`StorageDoubleMap`](storage::types::StorageDoubleMap) and +/// [`StorageNMap`](storage::types::StorageNMap). The main difference to the normal +/// [`#[pallet::storage]`](pallet_macros::storage) is the flexibility around declaring the +/// storage prefix to use. The storage prefix determines where to find the value in the +/// storage. [`#[pallet::storage]`](pallet_macros::storage) uses the name of the pallet as +/// declared in [`construct_runtime!`]. /// -/// ``` -/// # use frame_support::storage_alias; -/// use frame_support::codec; -/// use frame_support::Twox64Concat; -/// // generate a storage value with type u32. -/// #[storage_alias] -/// type StorageName = StorageValue; -/// -/// // generate a double map from `(u32, u32)` (with hashers `Twox64Concat` for each key) -/// // to `Vec` -/// #[storage_alias] -/// type OtherStorageName = StorageDoubleMap< -/// OtherPrefix, -/// Twox64Concat, -/// u32, -/// Twox64Concat, -/// u32, -/// Vec, -/// >; -/// -/// // optionally specify the query type -/// use frame_support::pallet_prelude::{ValueQuery, OptionQuery}; -/// #[storage_alias] -/// type ValueName = StorageValue; -/// #[storage_alias] -/// type SomeStorageName = StorageMap< -/// Prefix, -/// Twox64Concat, -/// u32, -/// Vec, -/// ValueQuery, -/// >; -/// -/// // generate a map from `Config::AccountId` (with hasher `Twox64Concat`) to `Vec` -/// trait Config { type AccountId: codec::FullCodec; } -/// #[storage_alias] -/// type GenericStorage = StorageMap::AccountId, Vec>; -/// -/// // It also supports NMap -/// use frame_support::storage::types::Key as NMapKey; -/// -/// #[storage_alias] -/// type SomeNMap = StorageNMap, NMapKey), Vec>; -/// -/// // Using pallet name as prefix. -/// // -/// // When the first generic argument is taking generic arguments it is expected to be a pallet. -/// // The prefix will then be the pallet name as configured in the runtime through -/// // `construct_runtime!`. -/// -/// # struct Pallet(std::marker::PhantomData<(T, I)>); -/// # impl frame_support::traits::PalletInfoAccess for Pallet { -/// # fn index() -> usize { 0 } -/// # fn name() -> &'static str { "pallet" } -/// # fn module_name() -> &'static str { "module" } -/// # fn crate_version() -> frame_support::traits::CrateVersion { unimplemented!() } -/// # } -/// -/// #[storage_alias] -/// type SomeValue = StorageValue, u64>; -/// -/// // Pallet with instance -/// -/// #[storage_alias] -/// type SomeValue2 = StorageValue, u64>; -/// -/// # fn main() {} -/// ``` +/// The flexibility around declaring the storage prefix makes this macro very useful for +/// writing migrations etc. +/// +/// # Examples +/// +/// There are different ways to declare the `prefix` to use. The `prefix` type can either be +/// declared explicetly by passing it to the macro as an attribute or by letting the macro +/// guess on what the `prefix` type is. The `prefix` is always passed as the first generic +/// argument to the type declaration. When using [`#[pallet::storage]`](pallet_macros::storage) +/// this first generic argument is always `_`. Besides declaring the `prefix`, the rest of the +/// type declaration works as with [`#[pallet::storage]`](pallet_macros::storage). +/// +/// 1. Use the `verbatim` prefix type. This prefix type uses the given identifier as the +/// `prefix`: +#[doc = docify::embed!("src/tests/storage_alias.rs", verbatim_attribute)] +/// +/// 2. Use the `pallet_name` prefix type. This prefix type uses the name of the pallet as +/// configured in [`construct_runtime!`] as the `prefix`: +#[doc = docify::embed!("src/tests/storage_alias.rs", pallet_name_attribute)] +/// It requires that the given prefix type implements +/// [`PalletInfoAccess`](traits::PalletInfoAccess) (which is always the case for FRAME pallet +/// structs). In the example above, `Pallet` is the prefix type. +/// +/// 3. Use the `dynamic` prefix type. This prefix type calls [`Get::get()`](traits::Get::get) +/// to get the `prefix`: +#[doc = docify::embed!("src/tests/storage_alias.rs", dynamic_attribute)] +/// It requires that the given prefix type implements [`Get<'static str>`](traits::Get). +/// +/// 4. Let the macro "guess" what kind of prefix type to use. This only supports verbatim or +/// pallet name. The macro uses the presence of generic arguments to the prefix type as an +/// indication that it should use the pallet name as the `prefix`: +#[doc = docify::embed!("src/tests/storage_alias.rs", storage_alias_guess)] pub use frame_support_procedural::storage_alias; +pub use frame_support_procedural::derive_impl; + /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// /// The so-called parameter type can be created in four different ways: @@ -533,8 +499,7 @@ pub fn debug(data: &impl sp_std::fmt::Debug) { #[doc(inline)] pub use frame_support_procedural::{ - construct_runtime, decl_storage, match_and_insert, transactional, PalletError, - RuntimeDebugNoBound, + construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; #[doc(hidden)] @@ -818,712 +783,32 @@ macro_rules! assert_error_encoded_size { } => {}; } -#[cfg(feature = "std")] #[doc(hidden)] pub use serde::{Deserialize, Serialize}; -#[cfg(test)] -pub mod tests { - use super::*; - use crate::metadata_ir::{ - PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, - StorageEntryTypeIR, StorageHasherIR, - }; - use sp_io::{MultiRemovalResults, TestExternalities}; - use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; - use sp_std::result; - - pub use self::frame_system::{Config, Pallet}; - - #[pallet] - pub mod frame_system { - #[allow(unused)] - use super::{frame_system, frame_system::pallet_prelude::*}; - pub use crate::dispatch::RawOrigin; - use crate::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: 'static { - type BlockNumber: Parameter + Default + MaxEncodedLen; - type AccountId; - type BaseCallFilter: crate::traits::Contains; - type RuntimeOrigin; - type RuntimeCall; - type PalletInfo: crate::traits::PalletInfo; - type DbWeight: Get; - } - - #[pallet::error] - pub enum Error { - /// Required by construct_runtime - CallFiltered, - } - - #[pallet::origin] - pub type Origin = RawOrigin<::AccountId>; - - #[pallet::call] - impl Pallet {} - - #[pallet::storage] - pub type Data = StorageMap<_, Twox64Concat, u32, u64, ValueQuery>; - - #[pallet::storage] - pub type OptionLinkedMap = StorageMap<_, Blake2_128Concat, u32, u32, OptionQuery>; - - #[pallet::storage] - #[pallet::getter(fn generic_data)] - pub type GenericData = - StorageMap<_, Identity, T::BlockNumber, T::BlockNumber, ValueQuery>; - - #[pallet::storage] - #[pallet::getter(fn generic_data2)] - pub type GenericData2 = - StorageMap<_, Blake2_128Concat, T::BlockNumber, T::BlockNumber, OptionQuery>; - - #[pallet::storage] - pub type DataDM = - StorageDoubleMap<_, Twox64Concat, u32, Blake2_128Concat, u32, u64, ValueQuery>; - - #[pallet::storage] - pub type GenericDataDM = StorageDoubleMap< - _, - Blake2_128Concat, - T::BlockNumber, - Identity, - T::BlockNumber, - T::BlockNumber, - ValueQuery, - >; - - #[pallet::storage] - pub type GenericData2DM = StorageDoubleMap< - _, - Blake2_128Concat, - T::BlockNumber, - Twox64Concat, - T::BlockNumber, - T::BlockNumber, - OptionQuery, - >; - - #[pallet::storage] - #[pallet::unbounded] - pub type AppendableDM = StorageDoubleMap< - _, - Blake2_128Concat, - u32, - Blake2_128Concat, - T::BlockNumber, - Vec, - ValueQuery, - >; - - #[pallet::genesis_config] - pub struct GenesisConfig { - pub data: Vec<(u32, u64)>, - pub test_config: Vec<(u32, u32, u64)>, - } - - impl Default for GenesisConfig { - fn default() -> Self { - Self { data: vec![(15u32, 42u64)], test_config: vec![(15u32, 16u32, 42u64)] } - } - } - - #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { - fn build(&self) { - for (k, v) in &self.data { - >::insert(k, v); - } - for (k1, k2, v) in &self.test_config { - >::insert(k1, k2, v); - } - } - } - - pub mod pallet_prelude { - pub type OriginFor = ::RuntimeOrigin; - } - } - - type BlockNumber = u32; - type AccountId = u32; - type Header = generic::Header; - type UncheckedExtrinsic = generic::UncheckedExtrinsic; - type Block = generic::Block; - - crate::construct_runtime!( - pub enum Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: self::frame_system, - } - ); - - impl Config for Runtime { - type BlockNumber = BlockNumber; - type AccountId = AccountId; - type BaseCallFilter = crate::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type PalletInfo = PalletInfo; - type DbWeight = (); - } - - fn new_test_ext() -> TestExternalities { - GenesisConfig::default().build_storage().unwrap().into() - } - - trait Sorted { - fn sorted(self) -> Self; - } - - impl Sorted for Vec { - fn sorted(mut self) -> Self { - self.sort(); - self - } - } - - #[test] - fn storage_alias_works() { - new_test_ext().execute_with(|| { - #[crate::storage_alias] - type GenericData2 = StorageMap< - System, - Blake2_128Concat, - ::BlockNumber, - ::BlockNumber, - >; - - assert_eq!(Pallet::::generic_data2(5), None); - GenericData2::::insert(5, 5); - assert_eq!(Pallet::::generic_data2(5), Some(5)); - - /// Some random docs that ensure that docs are accepted - #[crate::storage_alias] - pub type GenericData = StorageMap< - Test2, - Blake2_128Concat, - ::BlockNumber, - ::BlockNumber, - >; - }); - } - - #[test] - fn storage_value_mutate_exists_should_work() { - new_test_ext().execute_with(|| { - #[crate::storage_alias] - pub type Value = StorageValue; - - assert!(!Value::exists()); - - Value::mutate_exists(|v| *v = Some(1)); - assert!(Value::exists()); - assert_eq!(Value::get(), Some(1)); - - // removed if mutated to `None` - Value::mutate_exists(|v| *v = None); - assert!(!Value::exists()); - }); - } - - #[test] - fn storage_value_try_mutate_exists_should_work() { - new_test_ext().execute_with(|| { - #[crate::storage_alias] - pub type Value = StorageValue; - - type TestResult = result::Result<(), &'static str>; - - assert!(!Value::exists()); - - // mutated if `Ok` - assert_ok!(Value::try_mutate_exists(|v| -> TestResult { - *v = Some(1); - Ok(()) - })); - assert!(Value::exists()); - assert_eq!(Value::get(), Some(1)); - - // no-op if `Err` - assert_noop!( - Value::try_mutate_exists(|v| -> TestResult { - *v = Some(2); - Err("nah") - }), - "nah" - ); - assert_eq!(Value::get(), Some(1)); - - // removed if mutated to`None` - assert_ok!(Value::try_mutate_exists(|v| -> TestResult { - *v = None; - Ok(()) - })); - assert!(!Value::exists()); - }); - } - - #[test] - fn map_issue_3318() { - new_test_ext().execute_with(|| { - type OptionLinkedMap = self::frame_system::OptionLinkedMap; - - OptionLinkedMap::insert(1, 1); - assert_eq!(OptionLinkedMap::get(1), Some(1)); - OptionLinkedMap::insert(1, 2); - assert_eq!(OptionLinkedMap::get(1), Some(2)); - }); - } - - #[test] - fn map_swap_works() { - new_test_ext().execute_with(|| { - type OptionLinkedMap = self::frame_system::OptionLinkedMap; - - OptionLinkedMap::insert(0, 0); - OptionLinkedMap::insert(1, 1); - OptionLinkedMap::insert(2, 2); - OptionLinkedMap::insert(3, 3); - - let collect = || OptionLinkedMap::iter().collect::>().sorted(); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - - // Two existing - OptionLinkedMap::swap(1, 2); - assert_eq!(collect(), vec![(0, 0), (1, 2), (2, 1), (3, 3)]); - - // Back to normal - OptionLinkedMap::swap(2, 1); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - - // Left existing - OptionLinkedMap::swap(2, 5); - assert_eq!(collect(), vec![(0, 0), (1, 1), (3, 3), (5, 2)]); - - // Right existing - OptionLinkedMap::swap(5, 2); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - }); - } - - #[test] - fn double_map_swap_works() { - new_test_ext().execute_with(|| { - type DataDM = self::frame_system::DataDM; - - DataDM::insert(0, 1, 1); - DataDM::insert(1, 0, 2); - DataDM::insert(1, 1, 3); - - let get_all = || { - vec![ - DataDM::get(0, 1), - DataDM::get(1, 0), - DataDM::get(1, 1), - DataDM::get(2, 0), - DataDM::get(2, 1), - ] - }; - assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); - - // Two existing - DataDM::swap(0, 1, 1, 0); - assert_eq!(get_all(), vec![2, 1, 3, 0, 0]); - - // Left existing - DataDM::swap(1, 0, 2, 0); - assert_eq!(get_all(), vec![2, 0, 3, 1, 0]); - - // Right existing - DataDM::swap(2, 1, 1, 1); - assert_eq!(get_all(), vec![2, 0, 0, 1, 3]); - }); - } - - #[test] - fn map_basic_insert_remove_should_work() { - new_test_ext().execute_with(|| { - type Map = self::frame_system::Data; - - // initialized during genesis - assert_eq!(Map::get(&15u32), 42u64); - - // get / insert / take - let key = 17u32; - assert_eq!(Map::get(&key), 0u64); - Map::insert(key, 4u64); - assert_eq!(Map::get(&key), 4u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::get(&key), 0u64); - - // mutate - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!(Map::get(&key), 15u64); - - // remove - Map::remove(&key); - assert_eq!(Map::get(&key), 0u64); - }); - } - - #[test] - fn map_iteration_should_work() { - new_test_ext().execute_with(|| { - type Map = self::frame_system::Data; - - assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); - // insert / remove - let key = 17u32; - Map::insert(key, 4u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42), (key, 4)]); - assert_eq!(Map::take(&15), 42u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![]); - - // Add couple of more elements - Map::insert(key, 42u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42)]); - Map::insert(key + 1, 43u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42), (key + 1, 43)]); - - // mutate - let key = key + 2; - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!( - Map::iter().collect::>().sorted(), - vec![(key - 2, 42), (key - 1, 43), (key, 15)] - ); - Map::mutate(&key, |val| { - *val = 17; - }); - assert_eq!( - Map::iter().collect::>().sorted(), - vec![(key - 2, 42), (key - 1, 43), (key, 17)] - ); - - // remove first - Map::remove(&key); - assert_eq!( - Map::iter().collect::>().sorted(), - vec![(key - 2, 42), (key - 1, 43)] - ); - - // remove last from the list - Map::remove(&(key - 2)); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 1, 43)]); - - // remove the last element - Map::remove(&(key - 1)); - assert_eq!(Map::iter().collect::>().sorted(), vec![]); - }); - } - - #[test] - fn double_map_basic_insert_remove_remove_prefix_with_commit_should_work() { - let key1 = 17u32; - let key2 = 18u32; - type DoubleMap = self::frame_system::DataDM; - let mut e = new_test_ext(); - e.execute_with(|| { - // initialized during genesis - assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); - - // get / insert / take - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - DoubleMap::insert(&key1, &key2, &4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 4u64); - assert_eq!(DoubleMap::take(&key1, &key2), 4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // mutate - DoubleMap::mutate(&key1, &key2, |val| *val = 15); - assert_eq!(DoubleMap::get(&key1, &key2), 15u64); - - // remove - DoubleMap::remove(&key1, &key2); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // remove prefix - DoubleMap::insert(&key1, &key2, &4u64); - DoubleMap::insert(&key1, &(key2 + 1), &4u64); - DoubleMap::insert(&(key1 + 1), &key2, &4u64); - DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); - }); - e.commit_all().unwrap(); - e.execute_with(|| { - assert!(matches!( - DoubleMap::clear_prefix(&key1, u32::max_value(), None), - MultiRemovalResults { maybe_cursor: None, backend: 2, unique: 2, loops: 2 } - )); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); - }); - } - - #[test] - fn double_map_basic_insert_remove_remove_prefix_should_work() { - new_test_ext().execute_with(|| { - let key1 = 17u32; - let key2 = 18u32; - type DoubleMap = self::frame_system::DataDM; - - // initialized during genesis - assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); - - // get / insert / take - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - DoubleMap::insert(&key1, &key2, &4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 4u64); - assert_eq!(DoubleMap::take(&key1, &key2), 4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // mutate - DoubleMap::mutate(&key1, &key2, |val| *val = 15); - assert_eq!(DoubleMap::get(&key1, &key2), 15u64); - - // remove - DoubleMap::remove(&key1, &key2); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // remove prefix - DoubleMap::insert(&key1, &key2, &4u64); - DoubleMap::insert(&key1, &(key2 + 1), &4u64); - DoubleMap::insert(&(key1 + 1), &key2, &4u64); - DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); - // all in overlay - assert!(matches!( - DoubleMap::clear_prefix(&key1, u32::max_value(), None), - MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } - )); - // Note this is the incorrect answer (for now), since we are using v2 of - // `clear_prefix`. - // When we switch to v3, then this will become: - // MultiRemovalResults:: { maybe_cursor: None, backend: 0, unique: 2, loops: 2 }, - assert!(matches!( - DoubleMap::clear_prefix(&key1, u32::max_value(), None), - MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } - )); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); - }); - } - - #[test] - fn double_map_append_should_work() { - new_test_ext().execute_with(|| { - type DoubleMap = self::frame_system::AppendableDM; - - let key1 = 17u32; - let key2 = 18u32; - - DoubleMap::insert(&key1, &key2, &vec![1]); - DoubleMap::append(&key1, &key2, 2); - assert_eq!(DoubleMap::get(&key1, &key2), &[1, 2]); - }); - } - - #[test] - fn double_map_mutate_exists_should_work() { - new_test_ext().execute_with(|| { - type DoubleMap = self::frame_system::DataDM; - - let (key1, key2) = (11, 13); - - // mutated - DoubleMap::mutate_exists(key1, key2, |v| *v = Some(1)); - assert_eq!(DoubleMap::get(&key1, key2), 1); - - // removed if mutated to `None` - DoubleMap::mutate_exists(key1, key2, |v| *v = None); - assert!(!DoubleMap::contains_key(&key1, key2)); - }); - } - - #[test] - fn double_map_try_mutate_exists_should_work() { - new_test_ext().execute_with(|| { - type DoubleMap = self::frame_system::DataDM; - type TestResult = Result<(), &'static str>; - - let (key1, key2) = (11, 13); - - // mutated if `Ok` - assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { - *v = Some(1); - Ok(()) - })); - assert_eq!(DoubleMap::get(&key1, key2), 1); - - // no-op if `Err` - assert_noop!( - DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { - *v = Some(2); - Err("nah") - }), - "nah" - ); - - // removed if mutated to`None` - assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { - *v = None; - Ok(()) - })); - assert!(!DoubleMap::contains_key(&key1, key2)); - }); - } - - fn expected_metadata() -> PalletStorageMetadataIR { - PalletStorageMetadataIR { - prefix: "System", - entries: vec![ - StorageEntryMetadataIR { - name: "Data", - modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Map { - hashers: vec![StorageHasherIR::Twox64Concat], - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - }, - default: vec![0, 0, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "OptionLinkedMap", - modifier: StorageEntryModifierIR::Optional, - ty: StorageEntryTypeIR::Map { - hashers: vec![StorageHasherIR::Blake2_128Concat], - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "GenericData", - modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Map { - hashers: vec![StorageHasherIR::Identity], - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - }, - default: vec![0, 0, 0, 0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "GenericData2", - modifier: StorageEntryModifierIR::Optional, - ty: StorageEntryTypeIR::Map { - hashers: vec![StorageHasherIR::Blake2_128Concat], - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "DataDM", - modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Map { - hashers: vec![ - StorageHasherIR::Twox64Concat, - StorageHasherIR::Blake2_128Concat, - ], - key: scale_info::meta_type::<(u32, u32)>(), - value: scale_info::meta_type::(), - }, - default: vec![0, 0, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "GenericDataDM", - modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Map { - hashers: vec![StorageHasherIR::Blake2_128Concat, StorageHasherIR::Identity], - key: scale_info::meta_type::<(u32, u32)>(), - value: scale_info::meta_type::(), - }, - default: vec![0, 0, 0, 0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "GenericData2DM", - modifier: StorageEntryModifierIR::Optional, - ty: StorageEntryTypeIR::Map { - hashers: vec![ - StorageHasherIR::Blake2_128Concat, - StorageHasherIR::Twox64Concat, - ], - key: scale_info::meta_type::<(u32, u32)>(), - value: scale_info::meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadataIR { - name: "AppendableDM", - modifier: StorageEntryModifierIR::Default, - ty: StorageEntryTypeIR::Map { - hashers: vec![ - StorageHasherIR::Blake2_128Concat, - StorageHasherIR::Blake2_128Concat, - ], - key: scale_info::meta_type::<(u32, u32)>(), - value: scale_info::meta_type::>(), - }, - default: vec![0], - docs: vec![], - }, - ], - } - } - - #[test] - fn store_metadata() { - let metadata = Pallet::::storage_metadata(); - pretty_assertions::assert_eq!(expected_metadata(), metadata); - } - - parameter_types! { - storage StorageParameter: u64 = 10; - } - - #[test] - fn check_storage_parameter_type_works() { - TestExternalities::default().execute_with(|| { - assert_eq!(sp_io::hashing::twox_128(b":StorageParameter:"), StorageParameter::key()); +#[doc(hidden)] +#[cfg(not(no_std))] +pub use macro_magic; - assert_eq!(10, StorageParameter::get()); +/// Private module re-exporting items used by frame support macros. +#[doc(hidden)] +pub mod _private { + pub use sp_inherents; +} - StorageParameter::set(&300); - assert_eq!(300, StorageParameter::get()); - }) - } +/// Prelude to be used for pallet testing, for ease of use. +#[cfg(feature = "std")] +pub mod testing_prelude { + pub use super::{ + assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_error_encoded_size, + assert_noop, assert_ok, assert_storage_noop, bounded_btree_map, bounded_vec, + parameter_types, traits::Get, + }; + pub use sp_arithmetic::assert_eq_error_rate; } /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { - #[cfg(feature = "std")] - pub use crate::traits::GenesisBuild; pub use crate::{ dispatch::{ DispatchClass, DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter, @@ -1535,20 +820,23 @@ pub mod pallet_prelude { storage::{ bounded_vec::BoundedVec, types::{ - CountedStorageMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, - StorageMap, StorageNMap, StorageValue, ValueQuery, + CountedStorageMap, CountedStorageNMap, Key as NMapKey, OptionQuery, ResultQuery, + StorageDoubleMap, StorageMap, StorageNMap, StorageValue, ValueQuery, }, + StorageList, }, traits::{ - ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, - PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, + BuildGenesisConfig, ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, + IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; pub use codec::{Decode, Encode, MaxEncodedLen}; pub use frame_support::pallet_macros::*; + pub use frame_support_procedural::register_default_impl; pub use scale_info::TypeInfo; + pub use sp_inherents::MakeFatalError; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, transaction_validity::{ @@ -1647,9 +935,14 @@ pub mod pallet_prelude { /// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not /// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not /// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. /// * All storages are marked as unbounded, meaning you do not need to implement /// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` /// on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. /// /// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or /// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument @@ -2213,7 +1506,7 @@ pub mod pallet_prelude { /// for the pallet. /// /// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait [`GenesisBuild`](`traits::GenesisBuild`) with +/// trait [`BuildGenesisConfig`](`traits::BuildGenesisConfig`) with /// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type /// generics are constrained to be either none, or `T` or `T: Config`. /// @@ -2495,14 +1788,15 @@ pub mod pallet_prelude { /// // /// // Type must implement the `Default` trait. /// #[pallet::genesis_config] -/// #[derive(Default)] -/// pub struct GenesisConfig { +/// #[derive(frame_support::DefaultNoBound)] +/// pub struct GenesisConfig { +/// _config: sp_std::marker::PhantomData, /// _myfield: u32, /// } /// /// // Declare genesis builder. (This is need only if GenesisConfig is declared) /// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { +/// impl BuildGenesisConfig for GenesisConfig { /// fn build(&self) {} /// } /// @@ -2632,13 +1926,14 @@ pub mod pallet_prelude { /// StorageMap; /// /// #[pallet::genesis_config] -/// #[derive(Default)] -/// pub struct GenesisConfig { +/// #[derive(frame_support::DefaultNoBound)] +/// pub struct GenesisConfig, I: 'static = ()> { +/// _config: sp_std::marker::PhantomData<(T,I)>, /// _myfield: u32, /// } /// /// #[pallet::genesis_build] -/// impl, I: 'static> GenesisBuild for GenesisConfig { +/// impl, I: 'static> BuildGenesisConfig for GenesisConfig { /// fn build(&self) {} /// } /// @@ -2696,13 +1991,13 @@ pub mod pallet_prelude { /// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p /// > meta.json` /// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the -/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p -/// my_pallet`. This template can be used as it contains all information for storages, -/// genesis config and genesis build. +/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p +/// my_pallet`. This template can be used as it contains all information for storages, +/// genesis config and genesis build. /// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one -/// file. Suggested order: +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one +/// file. Suggested order: /// * `Config`, /// * `decl_module`, /// * `decl_event`, @@ -2762,8 +2057,8 @@ pub mod pallet_prelude { /// 8. **migrate error**: rewrite it with attribute /// [`#[pallet::error]`](#error-palleterror-optional). /// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, -/// genesis config, genesis build and default implementation of genesis config can be -/// taken from it directly. +/// genesis config, genesis build and default implementation of genesis config can be taken +/// from it directly. /// /// Otherwise here is the manual process: /// @@ -2884,11 +2179,23 @@ pub mod pallet_macros { pub use frame_support_procedural::{ call_index, compact, composite_enum, config, constant, disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, - generate_store, genesis_build, genesis_config, getter, hooks, inherent, origin, storage, - storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, - whitelist_storage, + generate_store, genesis_build, genesis_config, getter, hooks, import_section, inherent, + no_default, origin, pallet_section, storage, storage_prefix, storage_version, type_value, + unbounded, validate_unsigned, weight, whitelist_storage, }; } +#[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] +pub mod error { + #[doc(hidden)] + pub use sp_runtime::traits::{BadOrigin, LookupError}; +} + +#[doc(inline)] +pub use frame_support_procedural::register_default_impl; + // Generate a macro that will enable/disable code based on `std` feature being active. sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); + +// Helper for implementing GenesisBuilder runtime API +pub mod genesis_builder_helper; diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 381f1feda9430..19eec194a76ad 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -15,8 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "try-runtime")] -use crate::storage::unhashed::contains_prefixed_key; use crate::{ traits::{GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, StorageVersion}, weights::{RuntimeDbWeight, Weight}, @@ -25,8 +23,149 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_core::Get; use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; use sp_std::marker::PhantomData; -#[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; + +/// EXPERIMENTAL: The API of this feature may change. +/// +/// Make it easier to write versioned runtime upgrades. +/// +/// [`VersionedRuntimeUpgrade`] allows developers to write migrations without worrying about +/// checking and setting storage versions. Instead, the developer wraps their migration in this +/// struct which takes care of version handling using best practices. +/// +/// It takes 5 type parameters: +/// - `From`: The version being upgraded from. +/// - `To`: The version being upgraded to. +/// - `Inner`: An implementation of `OnRuntimeUpgrade`. +/// - `Pallet`: The Pallet being upgraded. +/// - `Weight`: The runtime's RuntimeDbWeight implementation. +/// +/// When a [`VersionedRuntimeUpgrade`] `on_runtime_upgrade`, `pre_upgrade`, or `post_upgrade` +/// method is called, the on-chain version of the pallet is compared to `From`. If they match, the +/// `Inner` equivalent is called and the pallets on-chain version is set to `To` after the +/// migration. Otherwise, a warning is logged notifying the developer that the upgrade was a noop +/// and should probably be removed. +/// +/// ### Examples +/// ```ignore +/// // In file defining migrations +/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); +/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // OnRuntimeUpgrade implementation... +/// } +/// +/// pub type VersionCheckedMigrateV5ToV6 = +/// VersionedRuntimeUpgrade< +/// 5, +/// 6, +/// VersionUncheckedMigrateV5ToV6, +/// crate::pallet::Pallet, +/// ::DbWeight +/// >; +/// +/// // Migrations tuple to pass to the Executive pallet: +/// pub type Migrations = ( +/// // other migrations... +/// VersionCheckedMigrateV5ToV6, +/// // other migrations... +/// ); +/// ``` +#[cfg(feature = "experimental")] +pub struct VersionedRuntimeUpgrade { + _marker: PhantomData<(Inner, Pallet, Weight)>, +} + +/// A helper enum to wrap the pre_upgrade bytes like an Option before passing them to post_upgrade. +/// This enum is used rather than an Option to make the API clearer to the developer. +#[cfg(feature = "experimental")] +#[derive(codec::Encode, codec::Decode)] +pub enum VersionedPostUpgradeData { + /// The migration ran, inner vec contains pre_upgrade data. + MigrationExecuted(sp_std::vec::Vec), + /// This migration is a noop, do not run post_upgrade checks. + Noop, +} + +/// Implementation of the `OnRuntimeUpgrade` trait for `VersionedRuntimeUpgrade`. +/// +/// Its main function is to perform the runtime upgrade in `on_runtime_upgrade` only if the on-chain +/// version of the pallets storage matches `From`, and after the upgrade set the on-chain storage to +/// `To`. If the versions do not match, it writes a log notifying the developer that the migration +/// is a noop. +#[cfg(feature = "experimental")] +impl< + const FROM: u16, + const TO: u16, + Inner: crate::traits::OnRuntimeUpgrade, + Pallet: GetStorageVersion + PalletInfoAccess, + DbWeight: Get, + > crate::traits::OnRuntimeUpgrade for VersionedRuntimeUpgrade +{ + /// Executes pre_upgrade if the migration will run, and wraps the pre_upgrade bytes in + /// [`VersionedPostUpgradeData`] before passing them to post_upgrade, so it knows whether the + /// migration ran or not. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + let on_chain_version = Pallet::on_chain_storage_version(); + if on_chain_version == FROM { + Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) + } else { + Ok(VersionedPostUpgradeData::Noop.encode()) + } + } + + /// Executes the versioned runtime upgrade. + /// + /// First checks if the pallets on-chain storage version matches the version of this upgrade. If + /// it matches, it calls `Inner::on_runtime_upgrade`, updates the on-chain version, and returns + /// the weight. If it does not match, it writes a log notifying the developer that the migration + /// is a noop. + fn on_runtime_upgrade() -> Weight { + let on_chain_version = Pallet::on_chain_storage_version(); + if on_chain_version == FROM { + log::info!( + "Running {} VersionedOnRuntimeUpgrade: version {:?} to {:?}.", + Pallet::name(), + FROM, + TO + ); + + // Execute the migration + let weight = Inner::on_runtime_upgrade(); + + // Update the on-chain version + StorageVersion::new(TO).put::(); + + weight.saturating_add(DbWeight::get().reads_writes(1, 1)) + } else { + log::warn!( + "{} VersionedOnRuntimeUpgrade for version {:?} skipped because current on-chain version is {:?}.", + Pallet::name(), + FROM, + on_chain_version + ); + DbWeight::get().reads(1) + } + } + + /// Executes `Inner::post_upgrade` if the migration just ran. + /// + /// pre_upgrade passes [`VersionedPostUpgradeData::MigrationExecuted`] to post_upgrade if + /// the migration ran, and [`VersionedPostUpgradeData::Noop`] otherwise. + #[cfg(feature = "try-runtime")] + fn post_upgrade( + versioned_post_upgrade_data_bytes: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::DecodeAll; + match ::decode_all(&mut &versioned_post_upgrade_data_bytes[..]) + .map_err(|_| "VersionedRuntimeUpgrade post_upgrade failed to decode PreUpgradeData")? + { + VersionedPostUpgradeData::MigrationExecuted(inner_bytes) => + Inner::post_upgrade(inner_bytes), + VersionedPostUpgradeData::Noop => Ok(()), + } + } +} /// Can store the current pallet version in storage. pub trait StoreCurrentStorageVersion { @@ -118,12 +257,9 @@ pub fn migrate_from_pallet_version_to_storage_version< /// # Examples: /// ```ignore /// construct_runtime! { -/// pub enum Runtime where -/// Block = Block, -/// NodeBlock = primitives::Block, -/// UncheckedExtrinsic = UncheckedExtrinsic +/// pub enum Runtime /// { -/// System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, +/// System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, /// /// SomePalletToRemove: pallet_something::{Pallet, Call, Storage, Event} = 1, /// AnotherPalletToRemove: pallet_something_else::{Pallet, Call, Storage, Event} = 2, @@ -184,7 +320,9 @@ impl, DbWeight: Get> frame_support::traits } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + let hashed_prefix = twox_128(P::get().as_bytes()); match contains_prefixed_key(&hashed_prefix) { true => log::info!("Found {} keys pre-removal 👀", P::get()), @@ -193,16 +331,18 @@ impl, DbWeight: Get> frame_support::traits P::get() ), }; - Ok(Vec::new()) + Ok(sp_std::vec::Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: sp_std::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + let hashed_prefix = twox_128(P::get().as_bytes()); match contains_prefixed_key(&hashed_prefix) { true => { log::error!("{} has keys remaining post-removal ❗", P::get()); - return Err("Keys remaining post-removal, this should never happen 🚨") + return Err("Keys remaining post-removal, this should never happen 🚨".into()) }, false => log::info!("No {} keys found post-removal 🎉", P::get()), }; diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 5da68873b10e6..00a3f1bc7c1ce 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -21,7 +21,7 @@ use crate::{ Never, }; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; -use sp_std::{borrow::Borrow, prelude::*}; +use sp_std::prelude::*; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -78,7 +78,7 @@ pub trait StorageDoubleMap { KArg1: EncodeLike, { let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); - let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); + let key_hashed = k1.using_encoded(Self::Hasher1::hash); let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); @@ -95,8 +95,8 @@ pub trait StorageDoubleMap { KArg2: EncodeLike, { let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); - let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); - let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); + let key1_hashed = k1.using_encoded(Self::Hasher1::hash); + let key2_hashed = k2.using_encoded(Self::Hasher2::hash); let mut final_key = Vec::with_capacity( storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), @@ -198,7 +198,7 @@ where KArg2: EncodeLike, VArg: EncodeLike, { - unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) + unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val) } fn remove(k1: KArg1, k2: KArg2) @@ -336,8 +336,8 @@ where let old_key = { let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); - let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); - let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); + let key1_hashed = key1.using_encoded(OldHasher1::hash); + let key2_hashed = key2.using_encoded(OldHasher2::hash); let mut final_key = Vec::with_capacity( storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 3b36b9bddb704..90fac4b41c759 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -68,7 +68,7 @@ pub trait StorageMap { KeyArg: EncodeLike, { let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); - let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); + let key_hashed = key.using_encoded(Self::Hasher::hash); let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); @@ -178,34 +178,48 @@ where } fn translate Option>(mut f: F) { + let mut previous_key = None; + loop { + previous_key = Self::translate_next(previous_key, &mut f); + if previous_key.is_none() { + break + } + } + } + + fn translate_next Option>( + previous_key: Option>, + mut f: F, + ) -> Option> { let prefix = G::prefix_hash(); - let mut previous_key = prefix.clone(); - while let Some(next) = - sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) - { - previous_key = next; - let value = match unhashed::get::(&previous_key) { - Some(value) => value, - None => { - log::error!("Invalid translate: fail to decode old value"); - continue - }, - }; - - let mut key_material = G::Hasher::reverse(&previous_key[prefix.len()..]); - let key = match K::decode(&mut key_material) { - Ok(key) => key, - Err(_) => { - log::error!("Invalid translate: fail to decode key"); - continue - }, - }; + let previous_key = previous_key.unwrap_or_else(|| prefix.clone()); - match f(key, value) { - Some(new) => unhashed::put::(&previous_key, &new), - None => unhashed::kill(&previous_key), - } + let current_key = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix))?; + + let value = match unhashed::get::(¤t_key) { + Some(value) => value, + None => { + log::error!("Invalid translate: fail to decode old value"); + return Some(current_key) + }, + }; + + let mut key_material = G::Hasher::reverse(¤t_key[prefix.len()..]); + let key = match K::decode(&mut key_material) { + Ok(key) => key, + Err(_) => { + log::error!("Invalid translate: fail to decode key"); + return Some(current_key) + }, + }; + + match f(key, value) { + Some(new) => unhashed::put::(¤t_key, &new), + None => unhashed::kill(¤t_key), } + + Some(current_key) } } @@ -327,7 +341,7 @@ impl> storage::StorageMap fn migrate_key>(key: KeyArg) -> Option { let old_key = { let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); - let key_hashed = key.borrow().using_encoded(OldHasher::hash); + let key_hashed = key.using_encoded(OldHasher::hash); let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 568d400129689..bac9f642e37d6 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -53,12 +53,12 @@ mod tests { use crate::pallet_prelude::*; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: 'static { - type BlockNumber; + type Block: sp_runtime::traits::Block; type AccountId; type BaseCallFilter: crate::traits::Contains; type RuntimeOrigin; @@ -102,6 +102,11 @@ mod tests { pub mod pallet_prelude { pub type OriginFor = ::RuntimeOrigin; + + pub type HeaderFor = + <::Block as sp_runtime::traits::HeaderProvider>::HeaderT; + + pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; } } @@ -113,18 +118,14 @@ mod tests { crate::construct_runtime!( pub enum Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, { System: self::frame_system, } ); impl self::frame_system::Config for Runtime { - type BlockNumber = BlockNumber; type AccountId = AccountId; + type Block = Block; type BaseCallFilter = crate::traits::Everything; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; @@ -148,7 +149,7 @@ mod tests { #[test] fn value_translate_works() { - let t = GenesisConfig::default().build_storage().unwrap(); + let t = RuntimeGenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { type Value = self::frame_system::Value; @@ -170,7 +171,7 @@ mod tests { #[test] fn map_translate_works() { - let t = GenesisConfig::default().build_storage().unwrap(); + let t = RuntimeGenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { type NumberMap = self::frame_system::NumberMap; @@ -201,7 +202,7 @@ mod tests { #[test] fn try_mutate_works() { - let t = GenesisConfig::default().build_storage().unwrap(); + let t = RuntimeGenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { type Value = self::frame_system::Value; type NumberMap = self::frame_system::NumberMap; diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 8e945afdb6441..568c475bdc69d 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -37,18 +37,16 @@ pub struct StorageIterator { impl StorageIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated( - note = "Please use the storage_iter or storage_iter_with_suffix functions instead" - )] + #[deprecated(note = "Will be removed after July 2023; Please use the storage_iter or \ + storage_iter_with_suffix functions instead")] pub fn new(module: &[u8], item: &[u8]) -> Self { #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated( - note = "Please use the storage_iter or storage_iter_with_suffix functions instead" - )] + #[deprecated(note = "Will be removed after July 2023; Please use the storage_iter or \ + storage_iter_with_suffix functions instead")] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); let storage_prefix = storage_prefix(module, item); @@ -102,18 +100,16 @@ pub struct StorageKeyIterator { impl StorageKeyIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated( - note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" - )] + #[deprecated(note = "Will be removed after July 2023; Please use the storage_key_iter or \ + storage_key_iter_with_suffix functions instead")] pub fn new(module: &[u8], item: &[u8]) -> Self { #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated( - note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" - )] + #[deprecated(note = "Will be removed after July 2023; Please use the storage_key_iter or \ + storage_key_iter_with_suffix functions instead")] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); let storage_prefix = storage_prefix(module, item); diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 4c6ea943c6920..36e2c47383d19 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -160,6 +160,75 @@ pub trait StorageValue { } } +/// A non-continuous container type. +pub trait StorageList { + /// Iterator for normal and draining iteration. + type Iterator: Iterator; + + /// Append iterator for fast append operations. + type Appender: StorageAppender; + + /// List the elements in append order. + fn iter() -> Self::Iterator; + + /// Drain the elements in append order. + /// + /// Note that this drains a value as soon as it is being inspected. For example `take_while(|_| + /// false)` still drains the first element. This also applies to `peek()`. + fn drain() -> Self::Iterator; + + /// A fast append iterator. + fn appender() -> Self::Appender; + + /// Append a single element. + /// + /// Should not be called repeatedly; use `append_many` instead. + /// Worst case linear `O(len)` with `len` being the number if elements in the list. + fn append_one(item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + Self::append_many(core::iter::once(item)); + } + + /// Append many elements. + /// + /// Should not be called repeatedly; use `appender` instead. + /// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the + /// list. + fn append_many(items: I) + where + EncodeLikeValue: EncodeLike, + I: IntoIterator, + { + let mut ap = Self::appender(); + ap.append_many(items); + } +} + +/// Append iterator to append values to a storage struct. +/// +/// Can be used in situations where appending does not have constant time complexity. +pub trait StorageAppender { + /// Append a single item in constant time `O(1)`. + fn append(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike; + + /// Append many items in linear time `O(items.count())`. + // Note: a default impl is provided since `Self` is already assumed to be optimal for single + // append operations. + fn append_many(&mut self, items: I) + where + EncodeLikeValue: EncodeLike, + I: IntoIterator, + { + for item in items.into_iter() { + self.append(item); + } + } +} + /// A strongly-typed map in storage. /// /// Details on implementation can be found at [`generator::StorageMap`]. @@ -202,6 +271,18 @@ pub trait StorageMap { f: F, ) -> Result; + /// Mutate the value under a key if the value already exists. Do nothing and return the default + /// value if not. + fn mutate_extant, R: Default, F: FnOnce(&mut V) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::mutate_exists(key, |maybe_v| match maybe_v { + Some(ref mut value) => f(value), + None => R::default(), + }) + } + /// Mutate the value under a key. /// /// Deletes the item if mutated to a `None`. @@ -303,6 +384,15 @@ pub trait IterableStorageMap: StorageMap { /// /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. fn translate Option>(f: F); + + /// Translate the next entry following `previous_key` by a function `f`. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// Returns the next key to iterate from in lexicographical order of the encoded key. + fn translate_next Option>( + previous_key: Option>, + f: F, + ) -> Option>; } /// A strongly-typed double map in storage whose secondary keys and values can be iterated over. @@ -1172,6 +1262,20 @@ impl Iterator for ChildTriePrefixIterator { } } +/// Trait for storage types that store all its value after a unique prefix. +pub trait StoragePrefixedContainer { + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; + + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; + + /// Final full prefix that prefixes all keys. + fn final_prefix() -> [u8; 32] { + crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix()) + } +} + /// Trait for maps that store all its value after a unique prefix. /// /// By default the final prefix is: @@ -1180,7 +1284,7 @@ impl Iterator for ChildTriePrefixIterator { /// ``` pub trait StoragePrefixedMap { /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; + fn module_prefix() -> &'static [u8]; // TODO move to StoragePrefixedContainer /// Storage prefix. Used for generating final key. fn storage_prefix() -> &'static [u8]; @@ -1324,6 +1428,7 @@ mod private { impl Sealed for bounded_btree_map::BoundedBTreeMap {} impl Sealed for bounded_btree_set::BoundedBTreeSet {} impl Sealed for BTreeSet {} + impl<'a, T: EncodeLike, U: Encode> Sealed for codec::Ref<'a, T, U> {} macro_rules! impl_sealed_for_tuple { ($($elem:ident),+) => { diff --git a/frame/support/src/storage/stream_iter.rs b/frame/support/src/storage/stream_iter.rs index e784ebd14c52a..2205601938b88 100644 --- a/frame/support/src/storage/stream_iter.rs +++ b/frame/support/src/storage/stream_iter.rs @@ -134,7 +134,7 @@ impl ScaleContainerStreamIter { /// /// - `key`: Storage key of the container in the state. /// - /// Same as [`Self::try_new`], but logs a potential error and sets the length to `0`. + /// Same as [`Self::new_try`], but logs a potential error and sets the length to `0`. pub fn new(key: Vec) -> Self { let mut input = StorageInput::new(key); let length = if input.exists() { diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index e57942cbe0667..081f99fa16b0b 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -612,8 +612,9 @@ mod test { assert_eq!(A::count(), 2); // Insert an existing key, shouldn't increment counted values. - A::insert(3, 11); + A::insert(3, 12); + assert_eq!(A::try_get(3), Ok(12)); assert_eq!(A::count(), 2); // Remove non-existing. @@ -706,17 +707,17 @@ mod test { // Try succeed mutate existing to existing. A::try_mutate_exists(1, |query| { assert_eq!(*query, Some(43)); - *query = Some(43); + *query = Some(45); Result::<(), ()>::Ok(()) }) .unwrap(); - assert_eq!(A::try_get(1), Ok(43)); + assert_eq!(A::try_get(1), Ok(45)); assert_eq!(A::count(), 4); // Try succeed mutate existing to non-existing. A::try_mutate_exists(1, |query| { - assert_eq!(*query, Some(43)); + assert_eq!(*query, Some(45)); *query = None; Result::<(), ()>::Ok(()) }) diff --git a/frame/support/src/storage/types/counted_nmap.rs b/frame/support/src/storage/types/counted_nmap.rs new file mode 100644 index 0000000000000..43a243cc5d681 --- /dev/null +++ b/frame/support/src/storage/types/counted_nmap.rs @@ -0,0 +1,1427 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Counted storage n-map type. + +use crate::{ + storage::{ + types::{ + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OptionQuery, QueryKindTrait, + StorageEntryMetadataBuilder, StorageNMap, StorageValue, TupleToEncodedIter, ValueQuery, + }, + KeyGenerator, PrefixIterator, StorageAppend, StorageDecodeLength, + }, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; +use sp_api::metadata_ir::StorageEntryMetadataIR; +use sp_runtime::traits::Saturating; +use sp_std::prelude::*; + +/// A wrapper around a `StorageNMap` and a `StorageValue` to keep track of how many items +/// are in a map, without needing to iterate over all of the values. +/// +/// This storage item has some additional storage read and write overhead when manipulating values +/// compared to a regular storage map. +/// +/// For functions where we only add or remove a value, a single storage read is needed to check if +/// that value already exists. For mutate functions, two storage reads are used to check if the +/// value existed before and after the mutation. +/// +/// Whenever the counter needs to be updated, an additional read and write occurs to update that +/// counter. +pub struct CountedStorageNMap< + Prefix, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Key, Value, QueryKind, OnEmpty, MaxValues)>); + +/// The requirement for an instance of [`CountedStorageNMap`]. +pub trait CountedStorageNMapInstance: StorageInstance { + /// The prefix to use for the counter storage value. + type CounterPrefix: StorageInstance; +} + +// Private helper trait to access map from counted storage n-map +trait MapWrapper { + type Map; +} + +impl MapWrapper + for CountedStorageNMap +{ + type Map = StorageNMap; +} + +type CounterFor

= + StorageValue<

::CounterPrefix, u32, ValueQuery>; + +/// On removal logic for updating counter while draining upon some prefix with +/// [`crate::storage::PrefixIterator`]. +pub struct OnRemovalCounterUpdate(core::marker::PhantomData); + +impl crate::storage::PrefixIteratorOnRemoval + for OnRemovalCounterUpdate +{ + fn on_removal(_key: &[u8], _value: &[u8]) { + CounterFor::::mutate(|value| value.saturating_dec()); + } +} + +impl + CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// The key used to store the counter of the map. + pub fn counter_storage_final_key() -> [u8; 32] { + CounterFor::::hashed_key() + } + + /// The prefix used to generate the key of the map. + pub fn map_storage_final_prefix() -> Vec { + use crate::storage::generator::StorageNMap; + ::Map::prefix_hash() + } + + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for + TupleToEncodedIter>( + key: KArg, + ) -> Vec { + ::Map::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key + TupleToEncodedIter>(key: KArg) -> bool { + ::Map::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { + ::Map::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get + TupleToEncodedIter>( + key: KArg, + ) -> Result { + ::Map::try_get(key) + } + + /// Store or remove the value to be associated with `key` so that `get` returns the `query`. + /// It decrements the counter when the value is removed. + pub fn set + TupleToEncodedIter>( + key: KArg, + query: QueryKind::Query, + ) { + let option = QueryKind::from_query_to_optional_value(query); + if option.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::set(key, QueryKind::from_optional_value_to_query(option)) + } + + /// Take a value from storage, removing it afterwards. + pub fn take + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { + let removed_value = + ::Map::mutate_exists(key, |value| core::mem::replace(value, None)); + if removed_value.is_some() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + QueryKind::from_optional_value_to_query(removed_value) + } + + /// Swap the values of two key-pairs. + pub fn swap(key1: KArg1, key2: KArg2) + where + KOther: KeyGenerator, + KArg1: EncodeLikeTuple + TupleToEncodedIter, + KArg2: EncodeLikeTuple + TupleToEncodedIter, + { + ::Map::swap::(key1, key2) + } + + /// Store a value to be associated with the given keys from the map. + pub fn insert(key: KArg, val: VArg) + where + KArg: EncodeLikeTuple + EncodeLike + TupleToEncodedIter, + VArg: EncodeLike, + { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::insert(key, val) + } + + /// Remove the value under the given keys. + pub fn remove + EncodeLike + TupleToEncodedIter>( + key: KArg, + ) { + if ::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::remove(key) + } + + /// Attempt to remove items from the map matching a `partial_key` prefix. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map which match the `partial key`. If so, then the map may not be + /// empty when the resultant `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must be provided in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent + /// calls operating on the same map/`partial_key` should always pass `Some`, and this should be + /// equal to the previous call result's `maybe_cursor` field. + pub fn clear_prefix( + partial_key: KP, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + Key: HasKeyPrefix, + { + let result = ::Map::clear_prefix(partial_key, limit, maybe_cursor); + match result.maybe_cursor { + None => CounterFor::::kill(), + Some(_) => CounterFor::::mutate(|x| x.saturating_reduce(result.unique)), + } + result + } + + /// Iterate over values that share the first key. + pub fn iter_prefix_values(partial_key: KP) -> PrefixIterator + where + Key: HasKeyPrefix, + { + ::Map::iter_prefix_values(partial_key) + } + + /// Mutate the value under the given keys. + pub fn mutate(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut QueryKind::Query) -> R, + { + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the value under the given keys when the closure returns `Ok`. + pub fn try_mutate(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + Self::try_mutate_exists(key, |option_value_ref| { + let option_value = core::mem::replace(option_value_ref, None); + let mut query = QueryKind::from_optional_value_to_query(option_value); + let res = f(&mut query); + let option_value = QueryKind::from_query_to_optional_value(query); + let _ = core::mem::replace(option_value_ref, option_value); + res + }) + } + + /// Mutate the value under the given keys. Deletes the item if mutated to a `None`. + pub fn mutate_exists(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> R, + { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + /// `f` will always be called with an option representing if the storage item exists (`Some`) + /// or if the storage item does not exist (`None`), independent of the `QueryType`. + pub fn try_mutate_exists(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> Result, + { + ::Map::try_mutate_exists(key, |option_value| { + let existed = option_value.is_some(); + let res = f(option_value); + let exist = option_value.is_some(); + + if res.is_ok() { + if existed && !exist { + // Value was deleted + CounterFor::::mutate(|value| value.saturating_dec()); + } else if !existed && exist { + // Value was added + CounterFor::::mutate(|value| value.saturating_inc()); + } + } + res + }) + } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(key: KArg, item: EncodeLikeItem) + where + KArg: EncodeLikeTuple + EncodeLike + TupleToEncodedIter, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key1` and `key2`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len + TupleToEncodedIter>( + key: KArg, + ) -> Option + where + Value: StorageDecodeLength, + { + ::Map::decode_len(key) + } + + /// Migrate an item with the given `key` from defunct `hash_fns` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_keys(key: KArg, hash_fns: Key::HArg) -> Option + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + { + ::Map::migrate_keys::<_>(key, hash_fns) + } + + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults { + let result = ::Map::clear(limit, maybe_cursor); + match result.maybe_cursor { + None => CounterFor::::kill(), + Some(_) => CounterFor::::mutate(|x| x.saturating_reduce(result.unique)), + } + result + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + ::Map::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(mut f: F) { + ::Map::translate_values(|old_value| { + let res = f(old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } + + /// Initialize the counter with the actual number of items in the map. + /// + /// This function iterates through all the items in the map and sets the counter. This operation + /// can be very heavy, so use with caution. + /// + /// Returns the number of items in the map which is used to set the counter. + pub fn initialize_counter() -> u32 { + let count = Self::iter_values().count() as u32; + CounterFor::::set(count); + count + } + + /// Return the count. + pub fn count() -> u32 { + CounterFor::::get() + } +} + +impl + CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: super::key::ReversibleKeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map with prefix key `kp` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix( + kp: KP, + ) -> crate::storage::PrefixIterator<(>::Suffix, Value)> + where + Key: HasReversibleKeyPrefix, + { + ::Map::iter_prefix(kp) + } + + /// Enumerate all elements in the map with prefix key `kp` after a specified `starting_raw_key` + /// in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator< + (>::Suffix, Value), + OnRemovalCounterUpdate, + > + where + Key: HasReversibleKeyPrefix, + { + ::Map::iter_prefix_from(kp, starting_raw_key).convert_on_removal() + } + + /// Enumerate all suffix keys in the map with prefix key `kp` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix( + kp: KP, + ) -> crate::storage::KeyPrefixIterator<>::Suffix> + where + Key: HasReversibleKeyPrefix, + { + ::Map::iter_key_prefix(kp) + } + + /// Enumerate all suffix keys in the map with prefix key `kp` after a specified + /// `starting_raw_key` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator<>::Suffix> + where + Key: HasReversibleKeyPrefix, + { + ::Map::iter_key_prefix_from(kp, starting_raw_key) + } + + /// Remove all elements from the map with prefix key `kp` and iterate through them in no + /// particular order. + /// + /// If you add elements with prefix key `k1` to the map while doing this, you'll get undefined + /// results. + pub fn drain_prefix( + kp: KP, + ) -> crate::storage::PrefixIterator< + (>::Suffix, Value), + OnRemovalCounterUpdate, + > + where + Key: HasReversibleKeyPrefix, + { + ::Map::drain_prefix(kp).convert_on_removal() + } + + /// Enumerate all elements in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter( + ) -> crate::storage::PrefixIterator<(Key::Key, Value), OnRemovalCounterUpdate> { + ::Map::iter().convert_on_removal() + } + + /// Enumerate all elements in the map after a specified `starting_key` in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_from( + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator<(Key::Key, Value), OnRemovalCounterUpdate> { + ::Map::iter_from(starting_raw_key).convert_on_removal() + } + + /// Enumerate all keys in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys() -> crate::storage::KeyPrefixIterator { + ::Map::iter_keys() + } + + /// Enumerate all keys in the map after a specified `starting_raw_key` in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys_from( + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator { + ::Map::iter_keys_from(starting_raw_key) + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain( + ) -> crate::storage::PrefixIterator<(Key::Key, Value), OnRemovalCounterUpdate> { + ::Map::drain().convert_on_removal() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value can't be decoded because the storage is corrupted, then it is skipped. + pub fn translate Option>(mut f: F) { + ::Map::translate(|key, old_value| { + let res = f(key, old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } +} + +impl StorageEntryMetadataBuilder + for CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: super::key::KeyGenerator, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + ::Map::build_metadata(docs, entries); + CounterFor::::build_metadata( + vec![&"Counter for the related counted storage map"], + entries, + ); + } +} + +impl crate::traits::StorageInfoTrait + for CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: super::key::KeyGenerator + super::key::KeyGeneratorMaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + [::Map::storage_info(), CounterFor::::storage_info()].concat() + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl crate::traits::PartialStorageInfoTrait + for CountedStorageNMap +where + Prefix: CountedStorageNMapInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + [ + ::Map::partial_storage_info(), + CounterFor::::partial_storage_info(), + ] + .concat() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::{StorageHasher as _, *}, + storage::types::{Key as NMapKey, ValueQuery}, + }; + use sp_api::metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "Foo"; + } + impl CountedStorageNMapInstance for Prefix { + type CounterPrefix = Prefix; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 98 + } + } + + #[test] + fn test_1_key() { + type A = CountedStorageNMap, u32, OptionQuery>; + type AValueQueryWithAnOnEmpty = + CountedStorageNMap, u32, ValueQuery, ADefault>; + type B = CountedStorageNMap, u32, ValueQuery>; + type C = CountedStorageNMap, u8, ValueQuery>; + type WithLen = CountedStorageNMap, Vec>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&3u16.blake2_128_concat()); + assert_eq!(A::hashed_key_for((&3,)).to_vec(), k); + + assert_eq!(A::contains_key((3,)), false); + assert_eq!(A::get((3,)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 98); + assert_eq!(A::count(), 0); + + A::insert((3,), 10); + assert_eq!(A::contains_key((3,)), true); + assert_eq!(A::get((3,)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 10); + assert_eq!(A::count(), 1); + + A::swap::, _, _>((3,), (2,)); + assert_eq!(A::contains_key((3,)), false); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((3,)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 98); + assert_eq!(A::get((2,)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2,)), 10); + assert_eq!(A::count(), 1); + + A::remove((2,)); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(A::get((2,)), None); + assert_eq!(A::count(), 0); + + AValueQueryWithAnOnEmpty::mutate((2,), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2,), |v| *v = *v * 2); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(98 * 4)); + assert_eq!(A::count(), 1); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(98 * 4)); + assert_eq!(A::count(), 1); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(A::count(), 0); + + A::remove((2,)); + AValueQueryWithAnOnEmpty::mutate_exists((2,), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + assert_eq!(A::count(), 1); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + assert_eq!(A::try_get((2,)), Ok(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + assert_eq!(A::count(), 1); + + A::insert((2,), 10); + assert_eq!(A::take((2,)), Some(10)); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2,)), 98); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(A::try_get((2,)), Err(())); + assert_eq!(A::count(), 0); + + B::insert((2,), 10); + assert_eq!( + A::migrate_keys((2,), (Box::new(|key| Blake2_256::hash(key).to_vec()),),), + Some(10) + ); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + assert_eq!(A::count(), 1); + + A::insert((3,), 10); + A::insert((4,), 10); + assert_eq!(A::count(), 3); + let _ = A::clear(u32::max_value(), None); + assert!(!A::contains_key((2,)) && !A::contains_key((3,)) && !A::contains_key((4,))); + assert_eq!(A::count(), 0); + + A::insert((3,), 10); + A::insert((4,), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + assert_eq!(A::count(), 2); + + C::insert((3,), 10); + C::insert((4,), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 20), (3, 20)]); + assert_eq!(A::count(), 2); + + A::insert((3,), 10); + A::insert((4,), 10); + assert_eq!(A::iter().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + assert_eq!(A::count(), 0); + + C::insert((3,), 10); + C::insert((4,), 10); + A::translate::(|k1, v| Some((k1 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); + assert_eq!(A::count(), 2); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + ] + ); + + let _ = WithLen::clear(u32::max_value(), None); + assert_eq!(WithLen::decode_len((3,)), None); + WithLen::append((0,), 10); + assert_eq!(WithLen::decode_len((0,)), Some(1)); + }); + } + + #[test] + fn test_2_keys() { + type A = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey), + u32, + OptionQuery, + >; + type AValueQueryWithAnOnEmpty = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey), + u32, + ValueQuery, + ADefault, + >; + type B = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey), + u32, + ValueQuery, + >; + type C = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey), + u8, + ValueQuery, + >; + type WithLen = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey), + Vec, + >; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&3u16.blake2_128_concat()); + k.extend(&30u8.twox_64_concat()); + assert_eq!(A::hashed_key_for((3, 30)).to_vec(), k); + + assert_eq!(A::contains_key((3, 30)), false); + assert_eq!(A::get((3, 30)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 98); + assert_eq!(A::count(), 0); + + A::insert((3, 30), 10); + assert_eq!(A::contains_key((3, 30)), true); + assert_eq!(A::get((3, 30)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 10); + assert_eq!(A::count(), 1); + + A::swap::<(NMapKey, NMapKey), _, _>( + (3, 30), + (2, 20), + ); + assert_eq!(A::contains_key((3, 30)), false); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((3, 30)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 98); + assert_eq!(A::get((2, 20)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2, 20)), 10); + assert_eq!(A::count(), 1); + + A::remove((2, 20)); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::get((2, 20)), None); + assert_eq!(A::count(), 0); + + AValueQueryWithAnOnEmpty::mutate((2, 20), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2, 20), |v| *v = *v * 2); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(98 * 4)); + assert_eq!(A::count(), 1); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::count(), 0); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::count(), 0); + + A::remove((2, 20)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + assert_eq!(A::count(), 1); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + assert_eq!(A::count(), 1); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + assert_eq!(A::try_get((2, 20)), Ok(100)); + assert_eq!(A::count(), 1); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + assert_eq!(A::count(), 1); + + A::insert((2, 20), 10); + assert_eq!(A::take((2, 20)), Some(10)); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2, 20)), 98); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::try_get((2, 20)), Err(())); + assert_eq!(A::count(), 0); + + B::insert((2, 20), 10); + assert_eq!( + A::migrate_keys( + (2, 20), + ( + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Twox128::hash(key).to_vec()), + ), + ), + Some(10) + ); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + assert_eq!(A::count(), 1); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + assert_eq!(A::count(), 3); + let _ = A::clear(u32::max_value(), None); + // one of the item has been removed + assert!( + !A::contains_key((2, 20)) && !A::contains_key((3, 30)) && !A::contains_key((4, 40)) + ); + assert_eq!(A::count(), 0); + + assert_eq!(A::count(), 0); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + assert_eq!(A::count(), 2); + + C::insert((3, 30), 10); + C::insert((4, 40), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 20), ((3, 30), 20)]); + assert_eq!(A::count(), 2); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); + assert_eq!(A::drain().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + assert_eq!(A::count(), 0); + + C::insert((3, 30), 10); + C::insert((4, 40), 10); + A::translate::(|(k1, k2), v| Some((k1 * k2 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 1600), ((3, 30), 900)]); + assert_eq!(A::count(), 2); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + ] + ); + + let _ = WithLen::clear(u32::max_value(), None); + assert_eq!(WithLen::decode_len((3, 30)), None); + WithLen::append((0, 100), 10); + assert_eq!(WithLen::decode_len((0, 100)), Some(1)); + + A::insert((3, 30), 11); + A::insert((3, 31), 12); + A::insert((4, 40), 13); + A::insert((4, 41), 14); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![12, 11]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![13, 14]); + assert_eq!(A::count(), 5); + }); + } + + #[test] + fn test_3_keys() { + type A = CountedStorageNMap< + Prefix, + ( + NMapKey, + NMapKey, + NMapKey, + ), + u32, + OptionQuery, + >; + type AValueQueryWithAnOnEmpty = CountedStorageNMap< + Prefix, + ( + NMapKey, + NMapKey, + NMapKey, + ), + u32, + ValueQuery, + ADefault, + >; + type B = CountedStorageNMap< + Prefix, + (NMapKey, NMapKey, NMapKey), + u32, + ValueQuery, + >; + type C = CountedStorageNMap< + Prefix, + ( + NMapKey, + NMapKey, + NMapKey, + ), + u8, + ValueQuery, + >; + type WithLen = CountedStorageNMap< + Prefix, + ( + NMapKey, + NMapKey, + NMapKey, + ), + Vec, + >; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&1u16.blake2_128_concat()); + k.extend(&10u16.blake2_128_concat()); + k.extend(&100u16.twox_64_concat()); + assert_eq!(A::hashed_key_for((1, 10, 100)).to_vec(), k); + + assert_eq!(A::contains_key((1, 10, 100)), false); + assert_eq!(A::get((1, 10, 100)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 98); + assert_eq!(A::count(), 0); + + A::insert((1, 10, 100), 30); + assert_eq!(A::contains_key((1, 10, 100)), true); + assert_eq!(A::get((1, 10, 100)), Some(30)); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30); + assert_eq!(A::count(), 1); + + A::swap::< + ( + NMapKey, + NMapKey, + NMapKey, + ), + _, + _, + >((1, 10, 100), (2, 20, 200)); + assert_eq!(A::contains_key((1, 10, 100)), false); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((1, 10, 100)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 98); + assert_eq!(A::get((2, 20, 200)), Some(30)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2, 20, 200)), 30); + assert_eq!(A::count(), 1); + + A::remove((2, 20, 200)); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(A::get((2, 20, 200)), None); + assert_eq!(A::count(), 0); + + AValueQueryWithAnOnEmpty::mutate((2, 20, 200), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2, 20, 200), |v| *v = *v * 2); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(98 * 4)); + assert_eq!(A::count(), 1); + + A::remove((2, 20, 200)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20, 200), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(A::count(), 0); + + A::remove((2, 20, 200)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20, 200), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + assert_eq!(A::count(), 1); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + assert_eq!(A::count(), 1); + + A::remove((2, 20, 200)); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + assert_eq!(A::try_get((2, 20, 200)), Ok(100)); + assert_eq!(A::count(), 1); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + assert_eq!(A::count(), 1); + + A::insert((2, 20, 200), 10); + assert_eq!(A::take((2, 20, 200)), Some(10)); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2, 20, 200)), 98); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(A::try_get((2, 20, 200)), Err(())); + assert_eq!(A::count(), 0); + + B::insert((2, 20, 200), 10); + assert_eq!( + A::migrate_keys( + (2, 20, 200), + ( + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Twox128::hash(key).to_vec()), + ), + ), + Some(10) + ); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + assert_eq!(A::count(), 1); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + assert_eq!(A::count(), 3); + let _ = A::clear(u32::max_value(), None); + // one of the item has been removed + assert!( + !A::contains_key((2, 20, 200)) && + !A::contains_key((3, 30, 300)) && + !A::contains_key((4, 40, 400)) + ); + assert_eq!(A::count(), 0); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + assert_eq!(A::count(), 2); + + C::insert((3, 30, 300), 10); + C::insert((4, 40, 400), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 20), ((3, 30, 300), 20)]); + assert_eq!(A::count(), 2); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 10), ((3, 30, 300), 10)]); + assert_eq!( + A::drain().collect::>(), + vec![((4, 40, 400), 10), ((3, 30, 300), 10)] + ); + assert_eq!(A::iter().collect::>(), vec![]); + assert_eq!(A::count(), 0); + + C::insert((3, 30, 300), 10); + C::insert((4, 40, 400), 10); + A::translate::(|(k1, k2, k3), v| { + Some((k1 * k2 as u16 * v as u16 / k3 as u16).into()) + }); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 4), ((3, 30, 300), 3)]); + assert_eq!(A::count(), 2); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + }, + StorageEntryMetadataIR { + name: "Foo", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + vec!["Counter for the related counted storage map"] + }, + }, + ] + ); + + let _ = WithLen::clear(u32::max_value(), None); + assert_eq!(WithLen::decode_len((3, 30, 300)), None); + WithLen::append((0, 100, 1000), 10); + assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1)); + + A::insert((3, 30, 300), 11); + A::insert((3, 30, 301), 12); + A::insert((4, 40, 400), 13); + A::insert((4, 40, 401), 14); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![14, 13]); + assert_eq!(A::iter_prefix_values((3, 30)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4, 40)).collect::>(), vec![14, 13]); + assert_eq!(A::count(), 5); + }); + } +} diff --git a/frame/support/src/storage/types/key.rs b/frame/support/src/storage/types/key.rs index bf87e593b063a..ec055aba803ec 100755 --- a/frame/support/src/storage/types/key.rs +++ b/frame/support/src/storage/types/key.rs @@ -37,7 +37,7 @@ pub struct Key(core::marker::PhantomData<(Hasher, KeyType)>); /// A trait that contains the current key as an associated type. pub trait KeyGenerator { type Key: EncodeLike + StaticTypeInfo; - type KArg: Encode; + type KArg: Encode + EncodeLike; type HashFn: FnOnce(&[u8]) -> Vec; type HArg; @@ -196,6 +196,11 @@ impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P); impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q); impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q, R); +impl<'a, T: EncodeLike + EncodeLikeTuple, U: Encode> EncodeLikeTuple + for codec::Ref<'a, T, U> +{ +} + /// Trait to indicate that a tuple can be converted into an iterator of a vector of encoded bytes. pub trait TupleToEncodedIter { fn to_encoded_iter(&self) -> sp_std::vec::IntoIter>; @@ -215,6 +220,15 @@ impl TupleToEncodedIter for &T { } } +impl<'a, T: EncodeLike + TupleToEncodedIter, U: Encode> TupleToEncodedIter + for codec::Ref<'a, T, U> +{ + fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + use core::ops::Deref as _; + self.deref().to_encoded_iter() + } +} + /// A trait that indicates the hashers for the keys generated are all reversible. pub trait ReversibleKeyGenerator: KeyGenerator { type ReversibleHasher; diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index 2110732b2f69c..c919dc6745d78 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -186,6 +186,14 @@ where >::try_mutate(key, f) } + /// Mutate the value under a key iff it exists. Do nothing and return the default value if not. + pub fn mutate_extant, R: Default, F: FnOnce(&mut Value) -> R>( + key: KeyArg, + f: F, + ) -> R { + >::mutate_extant(key, f) + } + /// Mutate the value under a key. Deletes the item if mutated to a `None`. pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, @@ -366,6 +374,16 @@ where >::iter_from(starting_raw_key) } + /// Enumerate all elements in the map after a specified `starting_key` in no + /// particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter_from_key( + starting_key: impl EncodeLike, + ) -> crate::storage::PrefixIterator<(Key, Value)> { + Self::iter_from(Self::hashed_key_for(starting_key)) + } + /// Enumerate all keys in the map in no particular order. /// /// If you alter the map while doing this, you'll get undefined results. @@ -381,6 +399,16 @@ where >::iter_keys_from(starting_raw_key) } + /// Enumerate all keys in the map after a specified `starting_key` in no particular + /// order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter_keys_from_key( + starting_key: impl EncodeLike, + ) -> crate::storage::KeyPrefixIterator { + Self::iter_keys_from(Self::hashed_key_for(starting_key)) + } + /// Remove all elements from the map and iterate through them in no particular order. /// /// If you add elements to the map while doing this, you'll get undefined results. @@ -484,7 +512,7 @@ mod test { use crate::{ hash::*, metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}, - storage::types::ValueQuery, + storage::{types::ValueQuery, IterableStorageMap}, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -700,6 +728,15 @@ mod test { A::translate::(|k, v| Some((k * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); + let translate_next = |k: u16, v: u8| Some((v as u16 / k).into()); + let k = A::translate_next::(None, translate_next); + let k = A::translate_next::(k, translate_next); + assert_eq!(None, A::translate_next::(k, translate_next)); + assert_eq!(A::iter().collect::>(), vec![(4, 10), (3, 10)]); + + let _ = A::translate_next::(None, |_, _| None); + assert_eq!(A::iter().collect::>(), vec![(3, 10)]); + let mut entries = vec![]; A::build_metadata(vec![], &mut entries); AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index 3a5bae2e608b7..99b0455fc6748 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -23,6 +23,7 @@ use codec::FullCodec; use sp_std::prelude::*; mod counted_map; +mod counted_nmap; mod double_map; mod key; mod map; @@ -30,6 +31,7 @@ mod nmap; mod value; pub use counted_map::{CountedStorageMap, CountedStorageMapInstance}; +pub use counted_nmap::{CountedStorageNMap, CountedStorageNMapInstance}; pub use double_map::StorageDoubleMap; pub use key::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, Key, KeyGenerator, diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index 9b63ca7b0f417..0fac1fc933706 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Storage map type. Implements StorageDoubleMap, StorageIterableDoubleMap, -//! StoragePrefixedDoubleMap traits and their methods directly. +//! Storage n-map type. Particularly implements `StorageNMap` and `StoragePrefixedMap` +//! traits and their methods directly. use crate::{ metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}, diff --git a/frame/support/src/tests/mod.rs b/frame/support/src/tests/mod.rs new file mode 100644 index 0000000000000..cb4b4e82418b1 --- /dev/null +++ b/frame/support/src/tests/mod.rs @@ -0,0 +1,629 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::metadata_ir::{ + PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, + StorageHasherIR, +}; +use sp_io::{MultiRemovalResults, TestExternalities}; +use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; + +pub use self::frame_system::{pallet_prelude::*, Config, Pallet}; + +mod storage_alias; + +#[pallet] +pub mod frame_system { + #[allow(unused)] + use super::{frame_system, frame_system::pallet_prelude::*}; + pub use crate::dispatch::RawOrigin; + use crate::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static { + type Block: Parameter + sp_runtime::traits::Block; + type AccountId; + type BaseCallFilter: crate::traits::Contains; + type RuntimeOrigin; + type RuntimeCall; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: Get; + } + + #[pallet::error] + pub enum Error { + /// Required by construct_runtime + CallFiltered, + } + + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + pub type Data = StorageMap<_, Twox64Concat, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type OptionLinkedMap = StorageMap<_, Blake2_128Concat, u32, u32, OptionQuery>; + + #[pallet::storage] + #[pallet::getter(fn generic_data)] + pub type GenericData = + StorageMap<_, Identity, BlockNumberFor, BlockNumberFor, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn generic_data2)] + pub type GenericData2 = + StorageMap<_, Blake2_128Concat, BlockNumberFor, BlockNumberFor, OptionQuery>; + + #[pallet::storage] + pub type DataDM = + StorageDoubleMap<_, Twox64Concat, u32, Blake2_128Concat, u32, u64, ValueQuery>; + + #[pallet::storage] + pub type GenericDataDM = StorageDoubleMap< + _, + Blake2_128Concat, + BlockNumberFor, + Identity, + BlockNumberFor, + BlockNumberFor, + ValueQuery, + >; + + #[pallet::storage] + pub type GenericData2DM = StorageDoubleMap< + _, + Blake2_128Concat, + BlockNumberFor, + Twox64Concat, + BlockNumberFor, + BlockNumberFor, + OptionQuery, + >; + + #[pallet::storage] + #[pallet::unbounded] + pub type AppendableDM = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Blake2_128Concat, + BlockNumberFor, + Vec, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub data: Vec<(u32, u64)>, + pub test_config: Vec<(u32, u32, u64)>, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + impl Default for GenesisConfig { + fn default() -> Self { + Self { + _config: Default::default(), + data: vec![(15u32, 42u64)], + test_config: vec![(15u32, 16u32, 42u64)], + } + } + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + for (k, v) in &self.data { + >::insert(k, v); + } + for (k1, k2, v) in &self.test_config { + >::insert(k1, k2, v); + } + } + } + + pub mod pallet_prelude { + pub type OriginFor = ::RuntimeOrigin; + + pub type HeaderFor = + <::Block as sp_runtime::traits::HeaderProvider>::HeaderT; + + pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; + } +} + +type BlockNumber = u32; +type AccountId = u32; +type Header = generic::Header; +type UncheckedExtrinsic = generic::UncheckedExtrinsic; +type Block = generic::Block; + +crate::construct_runtime!( + pub enum Runtime + { + System: self::frame_system, + } +); + +impl Config for Runtime { + type Block = Block; + type AccountId = AccountId; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type PalletInfo = PalletInfo; + type DbWeight = (); +} + +fn new_test_ext() -> TestExternalities { + RuntimeGenesisConfig::default().build_storage().unwrap().into() +} + +trait Sorted { + fn sorted(self) -> Self; +} + +impl Sorted for Vec { + fn sorted(mut self) -> Self { + self.sort(); + self + } +} + +#[test] +fn map_issue_3318() { + new_test_ext().execute_with(|| { + type OptionLinkedMap = self::frame_system::OptionLinkedMap; + + OptionLinkedMap::insert(1, 1); + assert_eq!(OptionLinkedMap::get(1), Some(1)); + OptionLinkedMap::insert(1, 2); + assert_eq!(OptionLinkedMap::get(1), Some(2)); + }); +} + +#[test] +fn map_swap_works() { + new_test_ext().execute_with(|| { + type OptionLinkedMap = self::frame_system::OptionLinkedMap; + + OptionLinkedMap::insert(0, 0); + OptionLinkedMap::insert(1, 1); + OptionLinkedMap::insert(2, 2); + OptionLinkedMap::insert(3, 3); + + let collect = || OptionLinkedMap::iter().collect::>().sorted(); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + + // Two existing + OptionLinkedMap::swap(1, 2); + assert_eq!(collect(), vec![(0, 0), (1, 2), (2, 1), (3, 3)]); + + // Back to normal + OptionLinkedMap::swap(2, 1); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + + // Left existing + OptionLinkedMap::swap(2, 5); + assert_eq!(collect(), vec![(0, 0), (1, 1), (3, 3), (5, 2)]); + + // Right existing + OptionLinkedMap::swap(5, 2); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + }); +} + +#[test] +fn double_map_swap_works() { + new_test_ext().execute_with(|| { + type DataDM = self::frame_system::DataDM; + + DataDM::insert(0, 1, 1); + DataDM::insert(1, 0, 2); + DataDM::insert(1, 1, 3); + + let get_all = || { + vec![ + DataDM::get(0, 1), + DataDM::get(1, 0), + DataDM::get(1, 1), + DataDM::get(2, 0), + DataDM::get(2, 1), + ] + }; + assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); + + // Two existing + DataDM::swap(0, 1, 1, 0); + assert_eq!(get_all(), vec![2, 1, 3, 0, 0]); + + // Left existing + DataDM::swap(1, 0, 2, 0); + assert_eq!(get_all(), vec![2, 0, 3, 1, 0]); + + // Right existing + DataDM::swap(2, 1, 1, 1); + assert_eq!(get_all(), vec![2, 0, 0, 1, 3]); + }); +} + +#[test] +fn map_basic_insert_remove_should_work() { + new_test_ext().execute_with(|| { + type Map = self::frame_system::Data; + + // initialized during genesis + assert_eq!(Map::get(&15u32), 42u64); + + // get / insert / take + let key = 17u32; + assert_eq!(Map::get(&key), 0u64); + Map::insert(key, 4u64); + assert_eq!(Map::get(&key), 4u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::get(&key), 0u64); + + // mutate + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!(Map::get(&key), 15u64); + + // remove + Map::remove(&key); + assert_eq!(Map::get(&key), 0u64); + }); +} + +#[test] +fn map_iteration_should_work() { + new_test_ext().execute_with(|| { + type Map = self::frame_system::Data; + + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); + // insert / remove + let key = 17u32; + Map::insert(key, 4u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42), (key, 4)]); + assert_eq!(Map::take(&15), 42u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); + + // Add couple of more elements + Map::insert(key, 42u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42)]); + Map::insert(key + 1, 43u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42), (key + 1, 43)]); + + // mutate + let key = key + 2; + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 15)] + ); + Map::mutate(&key, |val| { + *val = 17; + }); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 17)] + ); + + // remove first + Map::remove(&key); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43)]); + + // remove last from the list + Map::remove(&(key - 2)); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 1, 43)]); + + // remove the last element + Map::remove(&(key - 1)); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); + }); +} + +#[test] +fn double_map_basic_insert_remove_remove_prefix_with_commit_should_work() { + let key1 = 17u32; + let key2 = 18u32; + type DoubleMap = self::frame_system::DataDM; + let mut e = new_test_ext(); + e.execute_with(|| { + // initialized during genesis + assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); + + // get / insert / take + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + DoubleMap::insert(&key1, &key2, &4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 4u64); + assert_eq!(DoubleMap::take(&key1, &key2), 4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // mutate + DoubleMap::mutate(&key1, &key2, |val| *val = 15); + assert_eq!(DoubleMap::get(&key1, &key2), 15u64); + + // remove + DoubleMap::remove(&key1, &key2); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // remove prefix + DoubleMap::insert(&key1, &key2, &4u64); + DoubleMap::insert(&key1, &(key2 + 1), &4u64); + DoubleMap::insert(&(key1 + 1), &key2, &4u64); + DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); + }); + e.commit_all().unwrap(); + e.execute_with(|| { + assert!(matches!( + DoubleMap::clear_prefix(&key1, u32::max_value(), None), + MultiRemovalResults { maybe_cursor: None, backend: 2, unique: 2, loops: 2 } + )); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); + }); +} + +#[test] +fn double_map_basic_insert_remove_remove_prefix_should_work() { + new_test_ext().execute_with(|| { + let key1 = 17u32; + let key2 = 18u32; + type DoubleMap = self::frame_system::DataDM; + + // initialized during genesis + assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); + + // get / insert / take + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + DoubleMap::insert(&key1, &key2, &4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 4u64); + assert_eq!(DoubleMap::take(&key1, &key2), 4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // mutate + DoubleMap::mutate(&key1, &key2, |val| *val = 15); + assert_eq!(DoubleMap::get(&key1, &key2), 15u64); + + // remove + DoubleMap::remove(&key1, &key2); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // remove prefix + DoubleMap::insert(&key1, &key2, &4u64); + DoubleMap::insert(&key1, &(key2 + 1), &4u64); + DoubleMap::insert(&(key1 + 1), &key2, &4u64); + DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); + // all in overlay + assert!(matches!( + DoubleMap::clear_prefix(&key1, u32::max_value(), None), + MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } + )); + // Note this is the incorrect answer (for now), since we are using v2 of + // `clear_prefix`. + // When we switch to v3, then this will become: + // MultiRemovalResults:: { maybe_cursor: None, backend: 0, unique: 2, loops: 2 }, + assert!(matches!( + DoubleMap::clear_prefix(&key1, u32::max_value(), None), + MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } + )); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); + }); +} + +#[test] +fn double_map_append_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = self::frame_system::AppendableDM; + + let key1 = 17u32; + let key2 = 18u32; + + DoubleMap::insert(&key1, &key2, &vec![1]); + DoubleMap::append(&key1, &key2, 2); + assert_eq!(DoubleMap::get(&key1, &key2), &[1, 2]); + }); +} + +#[test] +fn double_map_mutate_exists_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = self::frame_system::DataDM; + + let (key1, key2) = (11, 13); + + // mutated + DoubleMap::mutate_exists(key1, key2, |v| *v = Some(1)); + assert_eq!(DoubleMap::get(&key1, key2), 1); + + // removed if mutated to `None` + DoubleMap::mutate_exists(key1, key2, |v| *v = None); + assert!(!DoubleMap::contains_key(&key1, key2)); + }); +} + +#[test] +fn double_map_try_mutate_exists_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = self::frame_system::DataDM; + type TestResult = Result<(), &'static str>; + + let (key1, key2) = (11, 13); + + // mutated if `Ok` + assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { + *v = Some(1); + Ok(()) + })); + assert_eq!(DoubleMap::get(&key1, key2), 1); + + // no-op if `Err` + assert_noop!( + DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { + *v = Some(2); + Err("nah") + }), + "nah" + ); + + // removed if mutated to`None` + assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { + *v = None; + Ok(()) + })); + assert!(!DoubleMap::contains_key(&key1, key2)); + }); +} + +fn expected_metadata() -> PalletStorageMetadataIR { + PalletStorageMetadataIR { + prefix: "System", + entries: vec![ + StorageEntryMetadataIR { + name: "Data", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Twox64Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "OptionLinkedMap", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "GenericData", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Identity], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: vec![0, 0, 0, 0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "GenericData2", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "DataDM", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Twox64Concat, StorageHasherIR::Blake2_128Concat], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), + }, + default: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "GenericDataDM", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat, StorageHasherIR::Identity], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), + }, + default: vec![0, 0, 0, 0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "GenericData2DM", + modifier: StorageEntryModifierIR::Optional, + ty: StorageEntryTypeIR::Map { + hashers: vec![StorageHasherIR::Blake2_128Concat, StorageHasherIR::Twox64Concat], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadataIR { + name: "AppendableDM", + modifier: StorageEntryModifierIR::Default, + ty: StorageEntryTypeIR::Map { + hashers: vec![ + StorageHasherIR::Blake2_128Concat, + StorageHasherIR::Blake2_128Concat, + ], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::>(), + }, + default: vec![0], + docs: vec![], + }, + ], + } +} + +#[test] +fn store_metadata() { + let metadata = Pallet::::storage_metadata(); + pretty_assertions::assert_eq!(expected_metadata(), metadata); +} + +parameter_types! { + storage StorageParameter: u64 = 10; +} + +#[test] +fn check_storage_parameter_type_works() { + TestExternalities::default().execute_with(|| { + assert_eq!(sp_io::hashing::twox_128(b":StorageParameter:"), StorageParameter::key()); + + assert_eq!(10, StorageParameter::get()); + + StorageParameter::set(&300); + assert_eq!(300, StorageParameter::get()); + }) +} diff --git a/frame/support/src/tests/storage_alias.rs b/frame/support/src/tests/storage_alias.rs new file mode 100644 index 0000000000000..05ea1b5f712c6 --- /dev/null +++ b/frame/support/src/tests/storage_alias.rs @@ -0,0 +1,192 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_core::Get; + +use super::{new_test_ext, BlockNumberFor, Config, Pallet, Runtime}; +use crate::{ + assert_noop, assert_ok, parameter_types, storage::generator::StorageValue, Blake2_128Concat, +}; + +#[test] +fn storage_alias_works() { + new_test_ext().execute_with(|| { + #[crate::storage_alias] + type GenericData2 = + StorageMap, BlockNumberFor>; + + assert_eq!(Pallet::::generic_data2(5), None); + GenericData2::::insert(5, 5); + assert_eq!(Pallet::::generic_data2(5), Some(5)); + + /// Some random docs that ensure that docs are accepted + #[crate::storage_alias] + pub type GenericData = + StorageMap, BlockNumberFor>; + + #[crate::storage_alias] + pub type GenericDataPallet = + StorageMap, Blake2_128Concat, BlockNumberFor, BlockNumberFor>; + }); +} + +#[test] +fn storage_value_mutate_exists_should_work() { + new_test_ext().execute_with(|| { + #[crate::storage_alias] + pub type Value = StorageValue; + + assert!(!Value::exists()); + + Value::mutate_exists(|v| *v = Some(1)); + assert!(Value::exists()); + assert_eq!(Value::get(), Some(1)); + + // removed if mutated to `None` + Value::mutate_exists(|v| *v = None); + assert!(!Value::exists()); + }); +} + +#[test] +fn storage_value_try_mutate_exists_should_work() { + new_test_ext().execute_with(|| { + #[crate::storage_alias] + pub type Value = StorageValue; + + type TestResult = std::result::Result<(), &'static str>; + + assert!(!Value::exists()); + + // mutated if `Ok` + assert_ok!(Value::try_mutate_exists(|v| -> TestResult { + *v = Some(1); + Ok(()) + })); + assert!(Value::exists()); + assert_eq!(Value::get(), Some(1)); + + // no-op if `Err` + assert_noop!( + Value::try_mutate_exists(|v| -> TestResult { + *v = Some(2); + Err("nah") + }), + "nah" + ); + assert_eq!(Value::get(), Some(1)); + + // removed if mutated to`None` + assert_ok!(Value::try_mutate_exists(|v| -> TestResult { + *v = None; + Ok(()) + })); + assert!(!Value::exists()); + }); +} + +#[docify::export] +#[test] +fn verbatim_attribute() { + new_test_ext().execute_with(|| { + // Declare the alias that will use the verbatim identifier as prefix. + #[crate::storage_alias(verbatim)] + pub type Value = StorageValue; + + // Check that it works as expected. + Value::put(1); + assert_eq!(1, Value::get().unwrap()); + + // The prefix is the one we declared above. + assert_eq!(&b"Test"[..], Value::module_prefix()); + }); +} + +#[docify::export] +#[test] +fn pallet_name_attribute() { + new_test_ext().execute_with(|| { + // Declare the alias that will use the pallet name as prefix. + #[crate::storage_alias(pallet_name)] + pub type Value = StorageValue, u32>; + + // Check that it works as expected. + Value::::put(1); + assert_eq!(1, Value::::get().unwrap()); + + // The prefix is the pallet name. In this case the pallet name is `System` as declared in + // `construct_runtime!`. + assert_eq!(&b"System"[..], Value::::module_prefix()); + }); +} + +#[docify::export] +#[test] +fn dynamic_attribute() { + new_test_ext().execute_with(|| { + // First let's declare our prefix. + // + // It could be any type that, as long as it implements `Get<&'static str>`. + parameter_types! { + pub Prefix: &'static str = "Hello"; + } + + // Declare the alias that will use the dynamic `Get` as prefix. + #[crate::storage_alias(dynamic)] + pub type Value> = StorageValue; + + // Check that it works as expected. + Value::::put(1); + assert_eq!(1, Value::::get().unwrap()); + + // The prefix is the one we declared above. + assert_eq!(&b"Hello"[..], Value::::module_prefix()); + }); +} + +#[docify::export] +#[test] +fn storage_alias_guess() { + new_test_ext().execute_with(|| { + // The macro will use `Test` as prefix. + #[crate::storage_alias] + pub type Value = StorageValue; + + assert_eq!(&b"Test"[..], Value::module_prefix()); + + // The macro will use the pallet name as prefix. + #[crate::storage_alias] + pub type PalletValue = StorageValue, u32>; + + assert_eq!(&b"System"[..], PalletValue::::module_prefix()); + }); +} + +#[test] +fn dynamic_attribute_without_generics_works() { + new_test_ext().execute_with(|| { + parameter_types! { + pub Prefix: &'static str = "Hello"; + } + + #[crate::storage_alias(dynamic)] + pub type Value = StorageValue; + + Value::put(1); + assert_eq!(1, Value::get().unwrap()) + }); +} diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 8d0f8aa88dc5e..27d16e133aa02 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -80,18 +80,18 @@ pub use metadata::{ }; mod hooks; -#[cfg(feature = "std")] +#[allow(deprecated)] pub use hooks::GenesisBuild; pub use hooks::{ - Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, - OnTimestampSet, + BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, + OnRuntimeUpgrade, OnTimestampSet, }; pub mod schedule; mod storage; pub use storage::{ - Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, - TrackedStorageKey, WhitelistedStorageKeys, + Incrementable, Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, + StorageInstance, TrackedStorageKey, WhitelistedStorageKeys, }; mod dispatch; @@ -100,22 +100,20 @@ pub use dispatch::EnsureOneOf; pub use dispatch::{ AsEnsureOriginWithArg, CallerTrait, EitherOf, EitherOfDiverse, EnsureOrigin, EnsureOriginEqualOrHigherPrivilege, EnsureOriginWithArg, MapSuccess, NeverEnsureOrigin, - OriginTrait, TryMapSuccess, UnfilteredDispatchable, + OriginTrait, TryMapSuccess, TryWithMorphedArg, UnfilteredDispatchable, }; mod voting; -pub use voting::{ - ClassCountOf, CurrencyToVote, PollStatus, Polling, SaturatingCurrencyToVote, - U128CurrencyToVote, VoteTally, -}; +pub use voting::{ClassCountOf, PollStatus, Polling, VoteTally}; mod preimages; pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; mod messages; pub use messages::{ - EnqueueMessage, ExecuteOverweightError, Footprint, NoopServiceQueues, ProcessMessage, - ProcessMessageError, ServiceQueues, TransformOrigin, + EnqueueMessage, EnqueueWithOrigin, ExecuteOverweightError, Footprint, HandleMessage, + NoopServiceQueues, ProcessMessage, ProcessMessageError, QueuePausedQuery, ServiceQueues, + TransformOrigin, }; #[cfg(feature = "try-runtime")] diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index 6961e69ba5750..9ea58479a0dfe 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -48,19 +48,6 @@ pub trait EnsureOrigin { fn try_successful_origin() -> Result; } -/// [`EnsureOrigin`] implementation that always fails. -pub struct NeverEnsureOrigin(sp_std::marker::PhantomData); -impl EnsureOrigin for NeverEnsureOrigin { - type Success = Success; - fn try_origin(o: OO) -> Result { - Err(o) - } - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Err(()) - } -} - /// [`EnsureOrigin`] implementation that checks that an origin has equal or higher privilege /// compared to the expected `Origin`. /// @@ -166,6 +153,62 @@ pub trait EnsureOriginWithArg { fn try_successful_origin(a: &Argument) -> Result; } +/// Simple macro to explicitly implement [EnsureOriginWithArg] to be used on any type which +/// implements [EnsureOrigin]. This is quick and dirty, so you must use the type parameters `O` +/// (the origin type), `T` (the argument type) and `AccountId` (if you are using the `O: ..` form). +/// +/// The argument is ignored, much like in [AsEnsureOriginWithArg]. +#[macro_export] +macro_rules! impl_ensure_origin_with_arg_ignoring_arg { + ( impl < { O: .., I: 'static, $( $bound:tt )* }> EnsureOriginWithArg for $name:ty {} ) => { + impl_ensure_origin_with_arg_ignoring_arg! { + impl <{ + O: Into, O>> + From>, + I: 'static, + $( $bound )* + }> EnsureOriginWithArg for $name {} + } + }; + ( impl < { O: .. , $( $bound:tt )* }> EnsureOriginWithArg for $name:ty {} ) => { + impl_ensure_origin_with_arg_ignoring_arg! { + impl <{ + O: Into, O>> + From>, + $( $bound )* + }> EnsureOriginWithArg for $name {} + } + }; + ( impl < { $( $bound:tt )* } > EnsureOriginWithArg<$o_param:ty, $t_param:ty> for $name:ty {} ) => { + impl < $( $bound )* > EnsureOriginWithArg<$o_param, $t_param> for $name { + type Success = >::Success; + fn try_origin(o: $o_param, _: &$t_param) -> Result { + >::try_origin(o) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(_: &$t_param) -> Result<$o_param, ()> { + >::try_successful_origin() + } + } + } +} + +/// [`EnsureOrigin`] implementation that always fails. +pub struct NeverEnsureOrigin(sp_std::marker::PhantomData); +impl EnsureOrigin for NeverEnsureOrigin { + type Success = Success; + fn try_origin(o: OO) -> Result { + Err(o) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Err(()) + } +} +impl_ensure_origin_with_arg_ignoring_arg! { + impl<{ OO, Success, A }> + EnsureOriginWithArg for NeverEnsureOrigin + {} +} + pub struct AsEnsureOriginWithArg(sp_std::marker::PhantomData); impl> EnsureOriginWithArg for AsEnsureOriginWithArg @@ -207,6 +250,18 @@ impl, Mutator: Morph> EnsureOrig Original::try_successful_origin() } } +impl, Mutator: Morph, A> + EnsureOriginWithArg for MapSuccess +{ + type Success = Mutator::Outcome; + fn try_origin(o: O, a: &A) -> Result { + Ok(Mutator::morph(Original::try_origin(o, a)?)) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &A) -> Result { + Original::try_successful_origin(a) + } +} /// A derivative `EnsureOrigin` implementation. It mutates the `Success` result of an `Original` /// implementation with a given `Mutator`, allowing the possibility of an error to be returned @@ -228,6 +283,43 @@ impl, Mutator: TryMorph> Original::try_successful_origin() } } +impl, Mutator: TryMorph, A> + EnsureOriginWithArg for TryMapSuccess +{ + type Success = Mutator::Outcome; + fn try_origin(o: O, a: &A) -> Result { + let orig = o.clone(); + Mutator::try_morph(Original::try_origin(o, a)?).map_err(|()| orig) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &A) -> Result { + Original::try_successful_origin(a) + } +} + +pub struct TryWithMorphedArg( + PhantomData<(O, A, Morph, Inner, Success)>, +); +impl< + O, + A, + Morph: for<'a> TryMorph<&'a A>, + Inner: for<'a> EnsureOriginWithArg>::Outcome, Success = Success>, + Success, + > EnsureOriginWithArg for TryWithMorphedArg +{ + type Success = Success; + fn try_origin(o: O, a: &A) -> Result { + match Morph::try_morph(a) { + Ok(x) => Inner::try_origin(o, &x), + _ => return Err(o), + } + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &A) -> Result { + Inner::try_successful_origin(&Morph::try_morph(a).map_err(|_| ())?) + } +} /// "OR gate" implementation of `EnsureOrigin` allowing for different `Success` types for `L` /// and `R`, with them combined using an `Either` type. @@ -250,6 +342,24 @@ impl, R: EnsureOrigin> L::try_successful_origin().or_else(|()| R::try_successful_origin()) } } +impl< + OuterOrigin, + L: EnsureOriginWithArg, + R: EnsureOriginWithArg, + Argument, + > EnsureOriginWithArg for EitherOfDiverse +{ + type Success = Either; + fn try_origin(o: OuterOrigin, a: &Argument) -> Result { + L::try_origin(o, a) + .map_or_else(|o| R::try_origin(o, a).map(Either::Right), |o| Ok(Either::Left(o))) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &Argument) -> Result { + L::try_successful_origin(a).or_else(|()| R::try_successful_origin(a)) + } +} /// "OR gate" implementation of `EnsureOrigin` allowing for different `Success` types for `L` /// and `R`, with them combined using an `Either` type. @@ -283,6 +393,23 @@ impl< L::try_successful_origin().or_else(|()| R::try_successful_origin()) } } +impl< + OuterOrigin, + L: EnsureOriginWithArg, + R: EnsureOriginWithArg, + Argument, + > EnsureOriginWithArg for EitherOf +{ + type Success = L::Success; + fn try_origin(o: OuterOrigin, a: &Argument) -> Result { + L::try_origin(o, a).or_else(|o| R::try_origin(o, a)) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &Argument) -> Result { + L::try_successful_origin(a).or_else(|()| R::try_successful_origin(a)) + } +} /// Type that can be dispatched with an origin but without checking the origin filter. /// @@ -306,6 +433,21 @@ pub trait CallerTrait: Parameter + Member + From /// Extract a reference to the system-level `RawOrigin` if it is that. fn as_system_ref(&self) -> Option<&RawOrigin>; + + /// Extract the signer from it if a system `Signed` origin, `None` otherwise. + fn as_signed(&self) -> Option<&AccountId> { + self.as_system_ref().and_then(RawOrigin::as_signed) + } + + /// Returns `true` if `self` is a system `Root` origin, `None` otherwise. + fn is_root(&self) -> bool { + self.as_system_ref().map_or(false, RawOrigin::is_root) + } + + /// Returns `true` if `self` is a system `None` origin, `None` otherwise. + fn is_none(&self) -> bool { + self.as_system_ref().map_or(false, RawOrigin::is_none) + } } /// Methods available on `frame_system::Config::RuntimeOrigin`. @@ -356,7 +498,13 @@ pub trait OriginTrait: Sized { fn signed(by: Self::AccountId) -> Self; /// Extract the signer from the message if it is a `Signed` origin. + #[deprecated = "Use `into_signer` instead"] fn as_signed(self) -> Option { + self.into_signer() + } + + /// Extract the signer from the message if it is a `Signed` origin. + fn into_signer(self) -> Option { self.into_caller().into_system().and_then(|s| { if let RawOrigin::Signed(who) = s { Some(who) diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs index 36420b46f0315..91efafef2f971 100644 --- a/frame/support/src/traits/filter.rs +++ b/frame/support/src/traits/filter.rs @@ -96,15 +96,6 @@ impl InstanceFilter for () { } } -/// Re-expected for the macro. -#[doc(hidden)] -pub use sp_std::{ - boxed::Box, - cell::RefCell, - mem::{swap, take}, - vec::Vec, -}; - #[macro_export] macro_rules! impl_filter_stack { ($target:ty, $base:ty, $call:ty, $module:ident) => { @@ -112,7 +103,8 @@ macro_rules! impl_filter_stack { mod $module { #[allow(unused_imports)] use super::*; - use $crate::traits::filter::{swap, take, RefCell, Vec, Box, Contains, FilterStack}; + use $crate::sp_std::{boxed::Box, cell::RefCell, mem::{swap, take}, vec::Vec}; + use $crate::traits::filter::{Contains, FilterStack}; thread_local! { static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index a4c6776572fac..e58f836070b75 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -15,25 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for hooking tasks to events in a blockchain's lifecycle. +//! Traits relating to pallet hooks. +//! +//! See [`Hooks`] as the main entry-point. + +#![deny(missing_docs)] use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; -/// The block initialization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is -/// beginning (right before the first extrinsic is executed). +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +/// See [`Hooks::on_initialize`]. pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - /// - /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, - /// including inherent extrinsics. Hence for instance, if you runtime includes - /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. + /// See [`Hooks::on_initialize`]. fn on_initialize(_n: BlockNumber) -> Weight { Weight::zero() } @@ -50,32 +48,18 @@ impl OnInitialize for Tuple { } } -/// The block finalization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is ending. +/// See [`Hooks::on_finalize`]. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - /// - /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, - /// including inherent extrinsics. + /// See [`Hooks::on_finalize`]. fn on_finalize(_n: BlockNumber) {} } -/// The block's on idle trait. -/// -/// Implementing this lets you express what should happen for your pallet before -/// block finalization (see `on_finalize` hook) in case any remaining weight is left. +/// See [`Hooks::on_idle`]. pub trait OnIdle { - /// The block is being finalized. - /// Implement to have something happen in case there is leftover weight. - /// Check the passed `remaining_weight` to make sure it is high enough to allow for - /// your pallet's extra computation. - /// - /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - - /// in a block are applied but before `on_finalize` is executed. + /// See [`Hooks::on_idle`]. fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { Weight::zero() } @@ -115,28 +99,25 @@ pub trait OnGenesis { fn on_genesis() {} } -/// The runtime upgrade trait. -/// -/// Implementing this lets you express what should happen when the runtime upgrades, -/// and changes may need to occur to your module. +/// See [`Hooks::on_runtime_upgrade`]. pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. + /// See [`Hooks::on_runtime_upgrade`]. fn on_runtime_upgrade() -> Weight { Weight::zero() } - /// Same as `on_runtime_upgrade`, but perform the optional `pre_upgrade` and `post_upgrade` as - /// well. + /// The expected and default behavior of this method is to handle executing `pre_upgrade` -> + /// `on_runtime_upgrade` -> `post_upgrade` hooks for a migration. + /// + /// Internally, the default implementation + /// - Handles passing data from `pre_upgrade` to `post_upgrade` + /// - Ensure storage is not modified in `pre_upgrade` and `post_upgrade` hooks. + /// + /// Combining the `pre_upgrade` -> `on_runtime_upgrade` -> `post_upgrade` logic flow into a + /// single method call is helpful for scenarios like testing a tuple of migrations, where the + /// tuple contains order-dependent migrations. #[cfg(feature = "try-runtime")] - fn try_on_runtime_upgrade(checks: bool) -> Result { + fn try_on_runtime_upgrade(checks: bool) -> Result { let maybe_state = if checks { let _guard = frame_support::StorageNoopGuard::default(); let state = Self::pre_upgrade()?; @@ -156,33 +137,15 @@ pub trait OnRuntimeUpgrade { Ok(weight) } - /// Execute some pre-checks prior to a runtime upgrade. - /// - /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), - /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector - /// should be returned if there is no such need. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - /// - /// This hook must not write to any state, as it would make the main `on_runtime_upgrade` path - /// inaccurate. + /// See [`Hooks::pre_upgrade`]. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) } - /// Execute some post-checks after a runtime upgrade. - /// - /// The `state` parameter is the `Vec` returned by `pre_upgrade` before upgrading, which - /// can be used for post-check. NOTE: if `pre_upgrade` is not implemented an empty vector will - /// be passed in, in such case `post_upgrade` should ignore it. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - /// - /// This hook must not write to any state, as it would make the main `on_runtime_upgrade` path - /// inaccurate. + /// See [`Hooks::post_upgrade`]. #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { Ok(()) } } @@ -197,11 +160,10 @@ impl OnRuntimeUpgrade for Tuple { weight } - /// We are executing pre- and post-checks sequentially in order to be able to test several - /// consecutive migrations for the same pallet without errors. Therefore pre and post upgrade - /// hooks for tuples are a noop. + /// Implements the default behavior of `try_on_runtime_upgrade` for tuples, logging any errors + /// that occur. #[cfg(feature = "try-runtime")] - fn try_on_runtime_upgrade(checks: bool) -> Result { + fn try_on_runtime_upgrade(checks: bool) -> Result { let mut weight = Weight::zero(); let mut errors = Vec::new(); @@ -224,76 +186,187 @@ impl OnRuntimeUpgrade for Tuple { errors.iter().for_each(|err| { log::error!( target: "try-runtime", - "{}", + "{:?}", err ); }); - return Err("Detected multiple errors while executing `try_on_runtime_upgrade`, check the logs!") + return Err("Detected multiple errors while executing `try_on_runtime_upgrade`, check the logs!".into()) } Ok(weight) } + + /// [`OnRuntimeUpgrade::pre_upgrade`] should not be used on a tuple. + /// + /// Instead, implementors should use [`OnRuntimeUpgrade::try_on_runtime_upgrade`] which + /// internally calls `pre_upgrade` -> `on_runtime_upgrade` -> `post_upgrade` for each tuple + /// member in sequence, enabling testing of order-dependent migrations. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Err("Usage of `pre_upgrade` with Tuples is not expected. Please use `try_on_runtime_upgrade` instead, which internally calls `pre_upgrade` -> `on_runtime_upgrade` -> `post_upgrade` for each tuple member.".into()) + } + + /// [`OnRuntimeUpgrade::post_upgrade`] should not be used on a tuple. + /// + /// Instead, implementors should use [`OnRuntimeUpgrade::try_on_runtime_upgrade`] which + /// internally calls `pre_upgrade` -> `on_runtime_upgrade` -> `post_upgrade` for each tuple + /// member in sequence, enabling testing of order-dependent migrations. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + Err("Usage of `post_upgrade` with Tuples is not expected. Please use `try_on_runtime_upgrade` instead, which internally calls `pre_upgrade` -> `on_runtime_upgrade` -> `post_upgrade` for each tuple member.".into()) + } } -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. +/// See [`Hooks::integrity_test`]. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. + /// See [`Hooks::integrity_test`]. fn integrity_test() {} } -/// The pallet hooks trait. Implementing this lets you express some logic to execute. +#[cfg_attr(doc, aquamarine::aquamarine)] +/// The pallet hooks trait. This is merely an umbrella trait for: +/// +/// - [`OnInitialize`] +/// - [`OnFinalize`] +/// - [`OnRuntimeUpgrade`] +/// - [`crate::traits::misc::OffchainWorker`] +/// - [`OnIdle`] +/// - [`IntegrityTest`] +/// +/// ## Ordering +/// +/// For all hooks, except [`OnIdle`] the order of execution is derived from how the pallets are +/// ordered in [`crate::construct_runtime`]. +/// +/// ## Summary +/// +/// In short, the following diagram shows the flow of hooks in a pallet +/// +/// ```mermaid +/// graph LR +/// Optional --> BeforeExtrinsics +/// BeforeExtrinsics --> Extrinsics +/// Extrinsics --> AfterExtrinsics +/// subgraph Optional +/// OnRuntimeUpgrade +/// end +/// +/// subgraph BeforeExtrinsics +/// OnInitialize +/// end +/// +/// subgraph Extrinsics +/// direction TB +/// Inherent1 +/// Inherent2 +/// Extrinsic1 +/// Extrinsic2 +/// +/// Inherent1 --> Inherent2 +/// Inherent2 --> Extrinsic1 +/// Extrinsic1 --> Extrinsic2 +/// end +/// +/// subgraph AfterExtrinsics +/// OnIdle +/// OnFinalize +/// +/// OnIdle --> OnFinalize +/// end +/// ``` +/// +/// * `OnRuntimeUpgrade` is only executed before everything else if a code +/// * `OnRuntimeUpgrade` is mandatorily at the beginning of the block body (extrinsics) being +/// processed. change is detected. +/// * Extrinsics start with inherents, and continue with other signed or unsigned extrinsics. +/// * `OnIdle` optionally comes after extrinsics. +/// `OnFinalize` mandatorily comes after `OnIdle`. +/// +/// > `OffchainWorker` is not part of this flow, as it is not really part of the consensus/main +/// > block import path, and is called optionally, and in other circumstances. See +/// > [`crate::traits::misc::OffchainWorker`] for more information. +/// +/// To learn more about the execution of hooks see `frame-executive` as this component is is charge +/// of dispatching extrinsics and placing the hooks in the correct order. pub trait Hooks { - /// The block is being finalized. Implement to have something happen. + /// Block initialization hook. This is called at the very beginning of block execution. + /// + /// Must return the non-negotiable weight of both itself and whatever [`Hooks::on_finalize`] + /// wishes to consume. + /// + /// ## Warning + /// + /// The weight returned by this is treated as `DispatchClass::Mandatory`, meaning that + /// it MUST BE EXECUTED. If this is not the case, consider using [`Hooks::on_idle`] instead. + /// + /// Try to keep any arbitrary execution __deterministic__ and within __minimal__ time + /// complexity. For example, do not execute any unbounded iterations. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, including inherent + /// extrinsics. Hence for instance, if you runtime includes `pallet-timestamp`, the `timestamp` + /// is not yet up to date at this point. + fn on_initialize(_n: BlockNumber) -> Weight { + Weight::zero() + } + + /// Block finalization hook. This is called at the very end of block execution. + /// + /// Note that this has nothing to do with finality in the "consensus" sense. + /// + /// Note that the non-negotiable weight for this has must have already been returned by + /// [`Hooks::on_initialize`]. It usage alone is not permitted. + /// + /// Similar to [`Hooks::on_initialize`] it should only be used when execution is absolutely + /// necessary. In other cases, consider using [`Hooks::on_idle`] instead. fn on_finalize(_n: BlockNumber) {} - /// This will be run when the block is being finalized (before `on_finalize`). + /// Hook to consume a block's idle time. This will run when the block is being finalized (before + /// [`Hooks::on_finalize`]). /// - /// Implement to have something happen using the remaining weight. Will not fire if the - /// remaining weight is 0. + /// Given that all dispatchables are already executed and noted (and the weight for + /// [`Hooks::on_finalize`], which comes next, is also already accounted for via + /// `on_initialize`), this hook consumes anything that is leftover. /// /// Each pallet's `on_idle` is chosen to be the first to execute in a round-robin fashion /// indexed by the block number. /// /// Return the weight used, the caller will use this to calculate the remaining weight and then /// call the next pallet `on_idle` hook if there is still weight left. + /// + /// Any implementation should always respect `_remaining_weight` and never consume (and + /// therefore return) more than this amount. fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { Weight::zero() } - /// The block is being initialized. Implement to have something happen. + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by FRAME. /// - /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> Weight { - Weight::zero() - } - - /// Perform a module upgrade. + /// Be aware that this is called before [`Hooks::on_initialize`] of any pallet; therefore, a lot + /// of the critical storage items such as `block_number` in system pallet might have not been + /// set. + /// + /// Vert similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST + /// execute. Use with care. /// - /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it - /// doesn't include the write of the pallet version in storage. The final complete logic - /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by - /// `Pallet`. + /// ## Implementation Note: Versioning /// - /// # Warning + /// 1. An implementation of this should typically follow a pattern where the version of the + /// pallet is checked against the onchain version, and a decision is made about what needs to be + /// done. This is helpful to prevent accidental repetitive execution of this hook, which can be + /// catastrophic. /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other block local data are - /// not accessible. + /// Alternatively, `migrations::VersionedRuntimeUpgrade` can be used to assist with + /// this. /// - /// Return the non-negotiable weight consumed for runtime upgrade. + /// ## Implementation Note: Runtime Level Migration /// - /// While this function can be freely implemented, using `on_runtime_upgrade` from inside the - /// pallet is discouraged and might get deprecated in the future. Alternatively, export the same - /// logic as a free-function from your pallet, and pass it to `type Executive` from the - /// top-level runtime. + /// Additional "upgrade hooks" can be created by pallets by a manual implementation of + /// [`Hooks::on_runtime_upgrade`] which can be passed on to `Executive` at the top level + /// runtime. fn on_runtime_upgrade() -> Weight { Weight::zero() } @@ -305,7 +378,7 @@ pub trait Hooks { /// /// This hook should not alter any storage. #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumber) -> Result<(), &'static str> { + fn try_state(_n: BlockNumber) -> Result<(), TryRuntimeError> { Ok(()) } @@ -317,7 +390,7 @@ pub trait Hooks { /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) } @@ -329,42 +402,63 @@ pub trait Hooks { /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { Ok(()) } - /// Implementing this function on a module allows you to perform long-running tasks - /// that make (by default) validators generate transactions that feed results - /// of those long-running computations back on chain. + /// Implementing this function on a pallet allows you to perform long-running tasks that are + /// dispatched as separate threads, and entirely independent of the main wasm runtime. /// - /// NOTE: This function runs off-chain, so it can access the block state, - /// but cannot preform any alterations. More specifically alterations are - /// not forbidden, but they are not persisted in any way after the worker - /// has finished. + /// This function can freely read from the state, but any change it makes to the state is + /// meaningless. Writes can be pushed back to the chain by submitting extrinsics from the + /// offchain worker to the transaction pool. See `pallet-example-offchain-worker` for more + /// details on this. /// - /// This function is being called after every block import (when fully synced). + /// Moreover, the code in this function has access to a wider range of host functions in + /// [`sp-io`], namely [`sp_io::offchain`]. This includes exotic operations such as HTTP calls + /// that are not really possible in the rest of the runtime code. /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. + /// The execution of this hook is entirely optional and is left at the discretion of the + /// node-side software and its configuration. In a normal substrate-cli, look for the CLI + /// flags related to offchain-workers to learn more. fn offchain_worker(_n: BlockNumber) {} - /// Run integrity test. + /// Check the integrity of this pallet's configuration. + /// + /// Any code located in this hook is placed in an auto-generated test, and generated as a part + /// of [`crate::construct_runtime`]'s expansion. Look for a test case with a name along the + /// lines of: `__construct_runtime_integrity_test`. + /// + /// This hook is the location where the values/types provided to the `Config` trait + /// of the pallet can be tested for correctness. For example, if two `type Foo: Get` and + /// `type Bar: Get` where `Foo::get()` must always be greater than `Bar::get()`, such + /// checks can be asserted upon here. /// - /// The test is not executed in a externalities provided environment. + /// Note that this hook is executed in an externality environment, provided by + /// `sp_io::TestExternalities`. This makes it possible to access the storage. fn integrity_test() {} } +/// A trait to define the build function of a genesis config for both runtime and pallets. +/// +/// Replaces deprecated [`GenesisBuild`]. +pub trait BuildGenesisConfig: Default + sp_runtime::traits::MaybeSerializeDeserialize { + /// The build function puts initial `GenesisConfig` keys/values pairs into the storage. + fn build(&self); +} + /// A trait to define the build function of a genesis config, T and I are placeholder for pallet /// trait and pallet instance. -#[cfg(feature = "std")] +#[deprecated( + note = "GenesisBuild is planned to be removed in December 2023. Use BuildGenesisConfig instead of it." +)] pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { /// The build function is called within an externalities allowing storage APIs. /// Thus one can write to storage using regular pallet storages. fn build(&self); /// Build the storage using `build` inside default storage. + #[cfg(feature = "std")] fn build_storage(&self) -> Result { let mut storage = Default::default(); self.assimilate_storage(&mut storage)?; @@ -372,6 +466,7 @@ pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeD } /// Assimilate the storage for this module into pre-existing overlays. + #[cfg(feature = "std")] fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { sp_state_machine::BasicExternalities::execute_with_storage(storage, || { self.build(); @@ -411,13 +506,13 @@ mod tests { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result, TryRuntimeError> { Pre::mutate(|s| s.push(stringify!($name))); Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { Post::mutate(|s| s.push(stringify!($name))); Ok(()) } @@ -430,6 +525,7 @@ mod tests { impl_test_type!(Baz); TestExternalities::default().execute_with(|| { + // try_on_runtime_upgrade works Foo::try_on_runtime_upgrade(true).unwrap(); assert_eq!(Pre::take(), vec!["Foo"]); assert_eq!(Post::take(), vec!["Foo"]); @@ -445,6 +541,10 @@ mod tests { <(Foo, (Bar, Baz))>::try_on_runtime_upgrade(true).unwrap(); assert_eq!(Pre::take(), vec!["Foo", "Bar", "Baz"]); assert_eq!(Post::take(), vec!["Foo", "Bar", "Baz"]); + + // calling pre_upgrade and post_upgrade directly on tuple of pallets fails + assert!(<(Foo, (Bar, Baz))>::pre_upgrade().is_err()); + assert!(<(Foo, (Bar, Baz))>::post_upgrade(vec![]).is_err()); }); } diff --git a/frame/support/src/traits/messages.rs b/frame/support/src/traits/messages.rs index 781da3ed6c704..36fa7957dff7c 100644 --- a/frame/support/src/traits/messages.rs +++ b/frame/support/src/traits/messages.rs @@ -59,6 +59,7 @@ pub trait ProcessMessage { message: &[u8], origin: Self::Origin, meter: &mut WeightMeter, + id: &mut [u8; 32], ) -> Result; } @@ -68,8 +69,18 @@ pub trait ProcessMessage { pub enum ExecuteOverweightError { /// The referenced message was not found. NotFound, + /// The message was already processed. + /// + /// This can be treated as success condition. + AlreadyProcessed, /// The available weight was insufficient to execute the message. InsufficientWeight, + /// The queue is paused and no message can be executed from it. + /// + /// This can change at any time and may resolve in the future by re-trying. + QueuePaused, + /// An unspecified error. + Other, } /// Can service queues and execute overweight messages. @@ -219,3 +230,15 @@ where E::footprint(O::get()) } } + +/// Provides information on paused queues. +pub trait QueuePausedQuery { + /// Whether this queue is paused. + fn is_paused(origin: &Origin) -> bool; +} + +impl QueuePausedQuery for () { + fn is_paused(_: &Origin) -> bool { + false + } +} diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index 54d264ec65b67..85d8f9a5a74e0 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode}; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; +use sp_std::{ops::Add, prelude::*}; /// Provides information about the pallet itself and its setup in the runtime. /// @@ -232,6 +232,14 @@ impl PartialOrd for StorageVersion { } } +impl Add for StorageVersion { + type Output = StorageVersion; + + fn add(self, rhs: u16) -> Self::Output { + Self::new(self.0 + rhs) + } +} + /// Special marker struct if no storage version is set for a pallet. /// /// If you (the reader) end up here, it probably means that you tried to compare diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index a6f8c46d63951..85eb7b47e26da 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -893,7 +893,8 @@ pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { #[cfg(feature = "std")] impl ExtrinsicCall for sp_runtime::testing::TestXt where - Call: codec::Codec + Sync + Send, + Call: codec::Codec + Sync + Send + TypeInfo, + Extra: TypeInfo, { fn call(&self) -> &Self::Call { &self.call @@ -903,7 +904,10 @@ where impl ExtrinsicCall for sp_runtime::generic::UncheckedExtrinsic where - Extra: sp_runtime::traits::SignedExtension, + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, + Extra: sp_runtime::traits::SignedExtension + TypeInfo, { fn call(&self) -> &Self::Call { &self.function diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index c3394185a7743..829cd31e4c370 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -20,6 +20,7 @@ use crate::sp_std::collections::btree_set::BTreeSet; use impl_trait_for_tuples::impl_for_tuples; pub use sp_core::storage::TrackedStorageKey; +use sp_runtime::traits::Saturating; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -36,6 +37,12 @@ pub trait Instance: 'static { const INDEX: u8; } +// Dummy implementation for `()`. +impl Instance for () { + const PREFIX: &'static str = ""; + const INDEX: u8 = 0; +} + /// An instance of a storage in a pallet. /// /// Define an instance for an individual storage inside a pallet. @@ -120,3 +127,42 @@ impl WhitelistedStorageKeys for Tuple { combined_keys.into_iter().collect::>() } } + +macro_rules! impl_incrementable { + ($($type:ty),+) => { + $( + impl Incrementable for $type { + fn increment(&self) -> Option { + let mut val = self.clone(); + val.saturating_inc(); + Some(val) + } + + fn initial_value() -> Option { + Some(0) + } + } + )+ + }; +} + +/// A trait representing an incrementable type. +/// +/// The `increment` and `initial_value` functions are fallible. +/// They should either both return `Some` with a valid value, or `None`. +pub trait Incrementable +where + Self: Sized, +{ + /// Increments the value. + /// + /// Returns `Some` with the incremented value if it is possible, or `None` if it is not. + fn increment(&self) -> Option; + + /// Returns the initial value. + /// + /// Returns `Some` with the initial value if it is available, or `None` if it is not. + fn initial_value() -> Option; +} + +impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs index 3f6f6b8e7384b..e6a7284a74b7f 100644 --- a/frame/support/src/traits/tokens/currency.rs +++ b/frame/support/src/traits/tokens/currency.rs @@ -25,9 +25,7 @@ use crate::{ dispatch::{DispatchError, DispatchResult}, traits::Get, }; -use codec::MaxEncodedLen; -use sp_runtime::{traits::MaybeSerializeDeserialize, FixedPointOperand}; -use sp_std::fmt::Debug; +use sp_runtime::traits::MaybeSerializeDeserialize; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; @@ -37,7 +35,7 @@ pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { /// The balance of an account. - type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen + FixedPointOperand; + type Balance: Balance + MaybeSerializeDeserialize; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. @@ -124,8 +122,7 @@ pub trait Currency { /// Transfer some liquid free balance to another staker. /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. + /// This is a very high-level function. It will ensure no imbalance in the system remains. fn transfer( source: &AccountId, dest: &AccountId, diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs index aa097a756d4df..79129cecdd696 100644 --- a/frame/support/src/traits/tokens/currency/reservable.rs +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -35,7 +35,8 @@ pub trait ReservableCurrency: Currency { /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. /// /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. + /// is less than `value`, then the second item will be equal to the value not able to be + /// slashed. fn slash_reserved( who: &AccountId, value: Self::Balance, @@ -47,9 +48,6 @@ pub trait ReservableCurrency: Currency { /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens /// that are still 'owned' by the account holder, but which are suspendable. /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. fn reserved_balance(who: &AccountId) -> Self::Balance; @@ -63,13 +61,8 @@ pub trait ReservableCurrency: Currency { /// Moves up to `value` from reserved balance to free balance. This function cannot fail. /// /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. + /// is less than `value`, then the remaining amount will be returned. This is different + /// behavior than `reserve`. fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; /// Moves up to `value` from reserved balance of account `slashed` to balance of account diff --git a/frame/support/src/traits/tokens/fungible/hold.rs b/frame/support/src/traits/tokens/fungible/hold.rs index ddcb8c6ac1da8..2605d1797ed2e 100644 --- a/frame/support/src/traits/tokens/fungible/hold.rs +++ b/frame/support/src/traits/tokens/fungible/hold.rs @@ -99,7 +99,7 @@ pub trait Inspect: super::Inspect { /// Check to see if some `amount` of funds of `who` may be placed on hold for the given /// `reason`. Reasons why this may not be true: /// - /// - The implementor supports only a limited number of concurrernt holds on an account which is + /// - The implementor supports only a limited number of concurrent holds on an account which is /// the possible values of `reason`; /// - The main balance of the account is less than `amount`; /// - Removing `amount` from the main balance would kill the account and remove the only @@ -118,7 +118,7 @@ pub trait Inspect: super::Inspect { /// **WARNING** /// Do not use this directly unless you want trouble, since it allows you to alter account balances /// without keeping the issuance up to date. It has no safeguards against accidentally creating -/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// token imbalances in your system leading to accidental inflation or deflation. It's really just /// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to /// use. pub trait Unbalanced: Inspect { @@ -131,7 +131,7 @@ pub trait Unbalanced: Inspect { /// invariants such as any Existential Deposits needed or overflows/underflows. /// If this cannot be done for some reason (e.g. because the account doesn't exist) then an /// `Err` is returned. - // Implmentation note: This should increment the consumer refs if it moves total on hold from + // Implementation note: This should increment the consumer refs if it moves total on hold from // zero to non-zero and decrement in the opposite direction. // // Since this was not done in the previous logic, this will need either a migration or a diff --git a/frame/support/src/traits/tokens/fungible/regular.rs b/frame/support/src/traits/tokens/fungible/regular.rs index 3476549464032..2838bed540aa2 100644 --- a/frame/support/src/traits/tokens/fungible/regular.rs +++ b/frame/support/src/traits/tokens/fungible/regular.rs @@ -58,7 +58,8 @@ pub trait Inspect: Sized { /// The minimum balance any single account may have. fn minimum_balance() -> Self::Balance; - /// Get the total amount of funds whose ultimate bneficial ownership can be determined as `who`. + /// Get the total amount of funds whose ultimate beneficial ownership can be determined as + /// `who`. /// /// This may include funds which are wholly inaccessible to `who`, either temporarily or even /// indefinitely. @@ -108,7 +109,7 @@ pub trait Inspect: Sized { /// Special dust type which can be type-safely converted into a `Credit`. #[must_use] -pub struct Dust>(pub(crate) T::Balance); +pub struct Dust>(pub T::Balance); impl> Dust { /// Convert `Dust` into an instance of `Credit`. diff --git a/frame/support/src/traits/tokens/fungibles/hold.rs b/frame/support/src/traits/tokens/fungibles/hold.rs index 68580ebff4bce..2adc00bb4d05a 100644 --- a/frame/support/src/traits/tokens/fungibles/hold.rs +++ b/frame/support/src/traits/tokens/fungibles/hold.rs @@ -98,7 +98,7 @@ pub trait Inspect: super::Inspect { who: &AccountId, amount: Self::Balance, ) -> DispatchResult { - ensure!(Self::hold_available(asset, reason, who), TokenError::CannotCreateHold); + ensure!(Self::hold_available(asset.clone(), reason, who), TokenError::CannotCreateHold); ensure!( amount <= Self::reducible_balance(asset, who, Protect, Force), TokenError::FundsUnavailable @@ -133,7 +133,7 @@ pub trait Inspect: super::Inspect { /// **WARNING** /// Do not use this directly unless you want trouble, since it allows you to alter account balances /// without keeping the issuance up to date. It has no safeguards against accidentally creating -/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// token imbalances in your system leading to accidental inflation or deflation. It's really just /// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to /// use. pub trait Unbalanced: Inspect { @@ -146,7 +146,7 @@ pub trait Unbalanced: Inspect { /// invariants such as any Existential Deposits needed or overflows/underflows. /// If this cannot be done for some reason (e.g. because the account doesn't exist) then an /// `Err` is returned. - // Implmentation note: This should increment the consumer refs if it moves total on hold from + // Implementation note: This should increment the consumer refs if it moves total on hold from // zero to non-zero and decrement in the opposite direction. // // Since this was not done in the previous logic, this will need either a migration or a @@ -173,7 +173,7 @@ pub trait Unbalanced: Inspect { mut amount: Self::Balance, precision: Precision, ) -> Result { - let old_balance = Self::balance_on_hold(asset, reason, who); + let old_balance = Self::balance_on_hold(asset.clone(), reason, who); if let BestEffort = precision { amount = amount.min(old_balance); } @@ -193,7 +193,7 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, precision: Precision, ) -> Result { - let old_balance = Self::balance_on_hold(asset, reason, who); + let old_balance = Self::balance_on_hold(asset.clone(), reason, who); let new_balance = if let BestEffort = precision { old_balance.saturating_add(amount) } else { @@ -221,11 +221,13 @@ pub trait Balanced: super::Balanced + Unbalanced (Credit, Self::Balance) { - let decrease = Self::decrease_balance_on_hold(asset, reason, who, amount, BestEffort) - .unwrap_or(Default::default()); + let decrease = + Self::decrease_balance_on_hold(asset.clone(), reason, who, amount, BestEffort) + .unwrap_or(Default::default()); let credit = Imbalance::::new( - asset, decrease, + asset.clone(), + decrease, ); Self::done_slash(asset, reason, who, decrease); (credit, amount.saturating_sub(decrease)) @@ -255,10 +257,10 @@ pub trait Mutate: // NOTE: This doesn't change the total balance of the account so there's no need to // check liquidity. - Self::ensure_can_hold(asset, reason, who, amount)?; + Self::ensure_can_hold(asset.clone(), reason, who, amount)?; // Should be infallible now, but we proceed softly anyway. - Self::decrease_balance(asset, who, amount, Exact, Protect, Force)?; - Self::increase_balance_on_hold(asset, reason, who, amount, BestEffort)?; + Self::decrease_balance(asset.clone(), who, amount, Exact, Protect, Force)?; + Self::increase_balance_on_hold(asset.clone(), reason, who, amount, BestEffort)?; Self::done_hold(asset, reason, who, amount); Ok(()) } @@ -281,13 +283,16 @@ pub trait Mutate: // We want to make sure we can deposit the amount in advance. If we can't then something is // very wrong. - ensure!(Self::can_deposit(asset, who, amount, Extant) == Success, TokenError::CannotCreate); + ensure!( + Self::can_deposit(asset.clone(), who, amount, Extant) == Success, + TokenError::CannotCreate + ); // Get the amount we can actually take from the hold. This might be less than what we want // if we're only doing a best-effort. - let amount = Self::decrease_balance_on_hold(asset, reason, who, amount, precision)?; + let amount = Self::decrease_balance_on_hold(asset.clone(), reason, who, amount, precision)?; // Increase the main balance by what we took. We always do a best-effort here because we // already checked that we can deposit before. - let actual = Self::increase_balance(asset, who, amount, BestEffort)?; + let actual = Self::increase_balance(asset.clone(), who, amount, BestEffort)?; Self::done_release(asset, reason, who, actual); Ok(actual) } @@ -310,14 +315,17 @@ pub trait Mutate: force: Fortitude, ) -> Result { // We must check total-balance requirements if `!force`. - let liquid = Self::reducible_total_balance_on_hold(asset, who, force); + let liquid = Self::reducible_total_balance_on_hold(asset.clone(), who, force); if let BestEffort = precision { amount = amount.min(liquid); } else { ensure!(amount <= liquid, TokenError::Frozen); } - let amount = Self::decrease_balance_on_hold(asset, reason, who, amount, precision)?; - Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(amount)); + let amount = Self::decrease_balance_on_hold(asset.clone(), reason, who, amount, precision)?; + Self::set_total_issuance( + asset.clone(), + Self::total_issuance(asset.clone()).saturating_sub(amount), + ); Self::done_burn_held(asset, reason, who, amount); Ok(amount) } @@ -348,8 +356,8 @@ pub trait Mutate: force: Fortitude, ) -> Result { // We must check total-balance requirements if `!force`. - let have = Self::balance_on_hold(asset, reason, source); - let liquid = Self::reducible_total_balance_on_hold(asset, source, force); + let have = Self::balance_on_hold(asset.clone(), reason, source); + let liquid = Self::reducible_total_balance_on_hold(asset.clone(), source, force); if let BestEffort = precision { amount = amount.min(liquid).min(have); } else { @@ -360,19 +368,20 @@ pub trait Mutate: // We want to make sure we can deposit the amount in advance. If we can't then something is // very wrong. ensure!( - Self::can_deposit(asset, dest, amount, Extant) == Success, + Self::can_deposit(asset.clone(), dest, amount, Extant) == Success, TokenError::CannotCreate ); ensure!( - mode == Free || Self::hold_available(asset, reason, dest), + mode == Free || Self::hold_available(asset.clone(), reason, dest), TokenError::CannotCreateHold ); - let amount = Self::decrease_balance_on_hold(asset, reason, source, amount, precision)?; + let amount = + Self::decrease_balance_on_hold(asset.clone(), reason, source, amount, precision)?; let actual = if mode == OnHold { - Self::increase_balance_on_hold(asset, reason, dest, amount, precision)? + Self::increase_balance_on_hold(asset.clone(), reason, dest, amount, precision)? } else { - Self::increase_balance(asset, dest, amount, precision)? + Self::increase_balance(asset.clone(), dest, amount, precision)? }; Self::done_transfer_on_hold(asset, reason, source, dest, actual); Ok(actual) @@ -405,14 +414,14 @@ pub trait Mutate: expendability: Preservation, force: Fortitude, ) -> Result { - ensure!(Self::hold_available(asset, reason, dest), TokenError::CannotCreateHold); + ensure!(Self::hold_available(asset.clone(), reason, dest), TokenError::CannotCreateHold); ensure!( - Self::can_deposit(asset, dest, amount, Extant) == Success, + Self::can_deposit(asset.clone(), dest, amount, Extant) == Success, TokenError::CannotCreate ); let actual = - Self::decrease_balance(asset, source, amount, precision, expendability, force)?; - Self::increase_balance_on_hold(asset, reason, dest, actual, precision)?; + Self::decrease_balance(asset.clone(), source, amount, precision, expendability, force)?; + Self::increase_balance_on_hold(asset.clone(), reason, dest, actual, precision)?; Self::done_transfer_on_hold(asset, reason, source, dest, actual); Ok(actual) } diff --git a/frame/support/src/traits/tokens/fungibles/imbalance.rs b/frame/support/src/traits/tokens/fungibles/imbalance.rs index ab18eec3811ff..1668268ea2dcf 100644 --- a/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -59,7 +59,7 @@ impl< { fn drop(&mut self) { if !self.amount.is_zero() { - OnDrop::handle(self.asset, self.amount) + OnDrop::handle(self.asset.clone(), self.amount) } } } @@ -104,9 +104,9 @@ impl< pub fn split(self, amount: B) -> (Self, Self) { let first = self.amount.min(amount); let second = self.amount - first; - let asset = self.asset; + let asset = self.asset.clone(); sp_std::mem::forget(self); - (Imbalance::new(asset, first), Imbalance::new(asset, second)) + (Imbalance::new(asset.clone(), first), Imbalance::new(asset, second)) } pub fn merge(mut self, other: Self) -> Result { if self.asset == other.asset { @@ -135,7 +135,7 @@ impl< > { if self.asset == other.asset { let (a, b) = (self.amount, other.amount); - let asset = self.asset; + let asset = self.asset.clone(); sp_std::mem::forget((self, other)); if a == b { @@ -154,7 +154,7 @@ impl< } pub fn asset(&self) -> A { - self.asset + self.asset.clone() } } diff --git a/frame/support/src/traits/tokens/fungibles/metadata.rs b/frame/support/src/traits/tokens/fungibles/metadata.rs index 64f8bf094fb0e..ab310119e5846 100644 --- a/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -39,3 +39,8 @@ pub trait Mutate: Inspect { decimals: u8, ) -> DispatchResult; } + +pub trait MetadataDeposit { + // Returns the required deposit amount for a given metadata. + fn calc_metadata_deposit(name: &[u8], symbol: &[u8]) -> DepositBalance; +} diff --git a/frame/support/src/traits/tokens/fungibles/regular.rs b/frame/support/src/traits/tokens/fungibles/regular.rs index 27d1a50b34805..b6cea15284d39 100644 --- a/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/frame/support/src/traits/tokens/fungibles/regular.rs @@ -62,7 +62,8 @@ pub trait Inspect: Sized { /// The minimum balance any single account may have. fn minimum_balance(asset: Self::AssetId) -> Self::Balance; - /// Get the total amount of funds whose ultimate bneficial ownership can be determined as `who`. + /// Get the total amount of funds whose ultimate beneficial ownership can be determined as + /// `who`. /// /// This may include funds which are wholly inaccessible to `who`, either temporarily or even /// indefinitely. @@ -121,7 +122,7 @@ pub trait Inspect: Sized { /// Special dust type which can be type-safely converted into a `Credit`. #[must_use] -pub struct Dust>(pub(crate) T::AssetId, pub(crate) T::Balance); +pub struct Dust>(pub T::AssetId, pub T::Balance); impl> Dust { /// Convert `Dust` into an instance of `Credit`. @@ -135,7 +136,7 @@ impl> Dust { /// **WARNING** /// Do not use this directly unless you want trouble, since it allows you to alter account balances /// without keeping the issuance up to date. It has no safeguards against accidentally creating -/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// token imbalances in your system leading to accidental inflation or deflation. It's really just /// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to /// use. pub trait Unbalanced: Inspect { @@ -146,7 +147,7 @@ pub trait Unbalanced: Inspect { /// This should not be reimplemented. fn handle_raw_dust(asset: Self::AssetId, amount: Self::Balance) { Self::handle_dust(Dust( - asset, + asset.clone(), amount.min(Self::minimum_balance(asset).saturating_sub(One::one())), )) } @@ -193,13 +194,13 @@ pub trait Unbalanced: Inspect { preservation: Preservation, force: Fortitude, ) -> Result { - let old_balance = Self::balance(asset, who); - let free = Self::reducible_balance(asset, who, preservation, force); + let old_balance = Self::balance(asset.clone(), who); + let free = Self::reducible_balance(asset.clone(), who, preservation, force); if let BestEffort = precision { amount = amount.min(free); } let new_balance = old_balance.checked_sub(&amount).ok_or(TokenError::FundsUnavailable)?; - if let Some(dust) = Self::write_balance(asset, who, new_balance)? { + if let Some(dust) = Self::write_balance(asset.clone(), who, new_balance)? { Self::handle_dust(Dust(asset, dust)); } Ok(old_balance.saturating_sub(new_balance)) @@ -217,13 +218,13 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, precision: Precision, ) -> Result { - let old_balance = Self::balance(asset, who); + let old_balance = Self::balance(asset.clone(), who); let new_balance = if let BestEffort = precision { old_balance.saturating_add(amount) } else { old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)? }; - if new_balance < Self::minimum_balance(asset) { + if new_balance < Self::minimum_balance(asset.clone()) { // Attempt to increase from 0 to below minimum -> stays at zero. if let BestEffort = precision { Ok(Self::Balance::default()) @@ -234,7 +235,7 @@ pub trait Unbalanced: Inspect { if new_balance == old_balance { Ok(Self::Balance::default()) } else { - if let Some(dust) = Self::write_balance(asset, who, new_balance)? { + if let Some(dust) = Self::write_balance(asset.clone(), who, new_balance)? { Self::handle_dust(Dust(asset, dust)); } Ok(new_balance.saturating_sub(old_balance)) @@ -258,11 +259,14 @@ pub trait Mutate: Inspect + Unbalanced { who: &AccountId, amount: Self::Balance, ) -> Result { - Self::total_issuance(asset) + Self::total_issuance(asset.clone()) .checked_add(&amount) .ok_or(ArithmeticError::Overflow)?; - let actual = Self::increase_balance(asset, who, amount, Exact)?; - Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_add(actual)); + let actual = Self::increase_balance(asset.clone(), who, amount, Exact)?; + Self::set_total_issuance( + asset.clone(), + Self::total_issuance(asset.clone()).saturating_add(actual), + ); Self::done_mint_into(asset, who, amount); Ok(actual) } @@ -277,13 +281,17 @@ pub trait Mutate: Inspect + Unbalanced { precision: Precision, force: Fortitude, ) -> Result { - let actual = Self::reducible_balance(asset, who, Expendable, force).min(amount); + let actual = Self::reducible_balance(asset.clone(), who, Expendable, force).min(amount); ensure!(actual == amount || precision == BestEffort, TokenError::FundsUnavailable); - Self::total_issuance(asset) + Self::total_issuance(asset.clone()) .checked_sub(&actual) .ok_or(ArithmeticError::Overflow)?; - let actual = Self::decrease_balance(asset, who, actual, BestEffort, Expendable, force)?; - Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(actual)); + let actual = + Self::decrease_balance(asset.clone(), who, actual, BestEffort, Expendable, force)?; + Self::set_total_issuance( + asset.clone(), + Self::total_issuance(asset.clone()).saturating_sub(actual), + ); Self::done_burn_from(asset, who, actual); Ok(actual) } @@ -303,13 +311,17 @@ pub trait Mutate: Inspect + Unbalanced { who: &AccountId, amount: Self::Balance, ) -> Result { - let actual = Self::reducible_balance(asset, who, Expendable, Polite).min(amount); + let actual = Self::reducible_balance(asset.clone(), who, Expendable, Polite).min(amount); ensure!(actual == amount, TokenError::FundsUnavailable); - Self::total_issuance(asset) + Self::total_issuance(asset.clone()) .checked_sub(&actual) .ok_or(ArithmeticError::Overflow)?; - let actual = Self::decrease_balance(asset, who, actual, BestEffort, Expendable, Polite)?; - Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_sub(actual)); + let actual = + Self::decrease_balance(asset.clone(), who, actual, BestEffort, Expendable, Polite)?; + Self::set_total_issuance( + asset.clone(), + Self::total_issuance(asset.clone()).saturating_sub(actual), + ); Self::done_shelve(asset, who, actual); Ok(actual) } @@ -329,11 +341,14 @@ pub trait Mutate: Inspect + Unbalanced { who: &AccountId, amount: Self::Balance, ) -> Result { - Self::total_issuance(asset) + Self::total_issuance(asset.clone()) .checked_add(&amount) .ok_or(ArithmeticError::Overflow)?; - let actual = Self::increase_balance(asset, who, amount, Exact)?; - Self::set_total_issuance(asset, Self::total_issuance(asset).saturating_add(actual)); + let actual = Self::increase_balance(asset.clone(), who, amount, Exact)?; + Self::set_total_issuance( + asset.clone(), + Self::total_issuance(asset.clone()).saturating_add(actual), + ); Self::done_restore(asset, who, amount); Ok(actual) } @@ -346,13 +361,13 @@ pub trait Mutate: Inspect + Unbalanced { amount: Self::Balance, preservation: Preservation, ) -> Result { - let _extra = - Self::can_withdraw(asset, source, amount).into_result(preservation != Expendable)?; - Self::can_deposit(asset, dest, amount, Extant).into_result()?; - Self::decrease_balance(asset, source, amount, BestEffort, preservation, Polite)?; + let _extra = Self::can_withdraw(asset.clone(), source, amount) + .into_result(preservation != Expendable)?; + Self::can_deposit(asset.clone(), dest, amount, Extant).into_result()?; + Self::decrease_balance(asset.clone(), source, amount, BestEffort, preservation, Polite)?; // This should never fail as we checked `can_deposit` earlier. But we do a best-effort // anyway. - let _ = Self::increase_balance(asset, dest, amount, BestEffort); + let _ = Self::increase_balance(asset.clone(), dest, amount, BestEffort); Self::done_transfer(asset, source, dest, amount); Ok(amount) } @@ -363,7 +378,7 @@ pub trait Mutate: Inspect + Unbalanced { /// /// Returns the new balance. fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> Self::Balance { - let b = Self::balance(asset, who); + let b = Self::balance(asset.clone(), who); if b > amount { Self::burn_from(asset, who, b - amount, BestEffort, Force).map(|d| b.saturating_sub(d)) } else { @@ -391,7 +406,7 @@ impl> HandleImbalanceDrop { fn handle(asset: U::AssetId, amount: U::Balance) { - U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)) + U::set_total_issuance(asset.clone(), U::total_issuance(asset).saturating_add(amount)) } } @@ -402,7 +417,7 @@ impl> HandleImbalanceDrop { fn handle(asset: U::AssetId, amount: U::Balance) { - U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)) + U::set_total_issuance(asset.clone(), U::total_issuance(asset).saturating_sub(amount)) } } @@ -423,11 +438,11 @@ pub trait Balanced: Inspect + Unbalanced { /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example /// in the case of underflow. fn rescind(asset: Self::AssetId, amount: Self::Balance) -> Debt { - let old = Self::total_issuance(asset); + let old = Self::total_issuance(asset.clone()); let new = old.saturating_sub(amount); - Self::set_total_issuance(asset, new); + Self::set_total_issuance(asset.clone(), new); let delta = old - new; - Self::done_rescind(asset, delta); + Self::done_rescind(asset.clone(), delta); Imbalance::::new( asset, delta, ) @@ -440,11 +455,11 @@ pub trait Balanced: Inspect + Unbalanced { /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example /// in the case of overflow. fn issue(asset: Self::AssetId, amount: Self::Balance) -> Credit { - let old = Self::total_issuance(asset); + let old = Self::total_issuance(asset.clone()); let new = old.saturating_add(amount); - Self::set_total_issuance(asset, new); + Self::set_total_issuance(asset.clone(), new); let delta = new - old; - Self::done_issue(asset, delta); + Self::done_issue(asset.clone(), delta); Imbalance::::new( asset, delta, ) @@ -458,7 +473,7 @@ pub trait Balanced: Inspect + Unbalanced { asset: Self::AssetId, amount: Self::Balance, ) -> (Debt, Credit) { - (Self::rescind(asset, amount), Self::issue(asset, amount)) + (Self::rescind(asset.clone(), amount), Self::issue(asset, amount)) } /// Mints `value` into the account of `who`, creating it as needed. @@ -476,8 +491,8 @@ pub trait Balanced: Inspect + Unbalanced { value: Self::Balance, precision: Precision, ) -> Result, DispatchError> { - let increase = Self::increase_balance(asset, who, value, precision)?; - Self::done_deposit(asset, who, increase); + let increase = Self::increase_balance(asset.clone(), who, value, precision)?; + Self::done_deposit(asset.clone(), who, increase); Ok(Imbalance::::new( asset, increase, )) @@ -504,8 +519,9 @@ pub trait Balanced: Inspect + Unbalanced { preservation: Preservation, force: Fortitude, ) -> Result, DispatchError> { - let decrease = Self::decrease_balance(asset, who, value, precision, preservation, force)?; - Self::done_withdraw(asset, who, decrease); + let decrease = + Self::decrease_balance(asset.clone(), who, value, precision, preservation, force)?; + Self::done_withdraw(asset.clone(), who, decrease); Ok(Imbalance::::new( asset, decrease, )) @@ -545,7 +561,7 @@ pub trait Balanced: Inspect + Unbalanced { ) -> Result, Debt> { let amount = debt.peek(); let asset = debt.asset(); - let credit = match Self::withdraw(asset, who, amount, Exact, preservation, Polite) { + let credit = match Self::withdraw(asset.clone(), who, amount, Exact, preservation, Polite) { Err(_) => return Err(debt), Ok(d) => d, }; diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 0ba900e95f9b8..baf3fd5f35464 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -221,10 +221,10 @@ impl WithdrawReasons { /// Simple amalgamation trait to collect together properties for an AssetId under one roof. pub trait AssetId: - FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo + MaxEncodedLen + FullCodec + Clone + Eq + PartialEq + Debug + scale_info::TypeInfo + MaxEncodedLen { } -impl AssetId +impl AssetId for T { } @@ -262,8 +262,8 @@ pub trait ConversionFromAssetBalance { ) -> Result; } -/// Trait to handle asset locking mechanism to ensure interactions with the asset can be implemented -/// downstream to extend logic of Uniques current functionality. +/// Trait to handle NFT locking mechanism to ensure interactions with the asset can be implemented +/// downstream to extend logic of Uniques/Nfts current functionality. pub trait Locker { /// Check if the asset should be locked and prevent interactions with the asset from executing. fn is_locked(collection: CollectionId, item: ItemId) -> bool; diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs index c23bf3e4055b1..c4463e0070f9a 100644 --- a/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -173,10 +173,18 @@ pub trait Mutate: Inspect { } } -/// Trait for transferring a non-fungible item. +/// Trait for transferring and controlling the transfer of non-fungible sets of items. pub trait Transfer: Inspect { /// Transfer `item` into `destination` account. fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; + /// Disable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn disable_transfer(item: &Self::ItemId) -> DispatchResult; + /// Re-enable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn enable_transfer(item: &Self::ItemId) -> DispatchResult; } /// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by @@ -312,4 +320,10 @@ impl< fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { >::transfer(&A::get(), item, destination) } + fn disable_transfer(item: &Self::ItemId) -> DispatchResult { + >::disable_transfer(&A::get(), item) + } + fn enable_transfer(item: &Self::ItemId) -> DispatchResult { + >::enable_transfer(&A::get(), item) + } } diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs index 9d32f29becd4c..345cce237b67b 100644 --- a/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -180,6 +180,16 @@ pub trait InspectEnumerable: Inspect { ) -> Self::OwnedInCollectionIterator; } +/// Trait for providing an interface to check the account's role within the collection. +pub trait InspectRole: Inspect { + /// Returns `true` if `who` is the issuer of the `collection`. + fn is_issuer(collection: &Self::CollectionId, who: &AccountId) -> bool; + /// Returns `true` if `who` is the admin of the `collection`. + fn is_admin(collection: &Self::CollectionId, who: &AccountId) -> bool; + /// Returns `true` if `who` is the freezer of the `collection`. + fn is_freezer(collection: &Self::CollectionId, who: &AccountId) -> bool; +} + /// Trait for providing the ability to create collections of nonfungible items. pub trait Create: Inspect { /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. @@ -188,6 +198,13 @@ pub trait Create: Inspect { admin: &AccountId, config: &CollectionConfig, ) -> Result; + + fn create_collection_with_id( + collection: Self::CollectionId, + who: &AccountId, + admin: &AccountId, + config: &CollectionConfig, + ) -> Result<(), DispatchError>; } /// Trait for providing the ability to destroy collections of nonfungible items. @@ -338,4 +355,18 @@ pub trait Transfer: Inspect { item: &Self::ItemId, destination: &AccountId, ) -> DispatchResult; + + /// Disable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn disable_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Re-enable the `item` of `collection` transfer. + /// + /// By default, this is not a supported operation. + fn enable_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } } diff --git a/frame/support/src/traits/tokens/pay.rs b/frame/support/src/traits/tokens/pay.rs index 23bd113bfef56..78f8e7b873480 100644 --- a/frame/support/src/traits/tokens/pay.rs +++ b/frame/support/src/traits/tokens/pay.rs @@ -20,6 +20,7 @@ use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::{RuntimeDebug, TypedGet}; +use sp_runtime::DispatchError; use sp_std::fmt::Debug; use super::{fungible, Balance, Preservation::Expendable}; @@ -38,13 +39,15 @@ pub trait Pay { type AssetKind; /// An identifier given to an individual payment. type Id: FullCodec + MaxEncodedLen + TypeInfo + Clone + Eq + PartialEq + Debug + Copy; + /// An error which could be returned by the Pay type + type Error: Debug; /// Make a payment and return an identifier for later evaluation of success in some off-chain /// mechanism (likely an event, but possibly not on this chain). fn pay( who: &Self::Beneficiary, asset_kind: Self::AssetKind, amount: Self::Balance, - ) -> Result; + ) -> Result; /// Check how a payment has proceeded. `id` must have been previously returned by `pay` for /// the result of this call to be meaningful. Once this returns anything other than /// `InProgress` for some `id` it must return `Unknown` rather than the actual result @@ -53,7 +56,11 @@ pub trait Pay { /// Ensure that a call to pay with the given parameters will be successful if done immediately /// after this call. Used in benchmarking code. #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(who: &Self::Beneficiary, amount: Self::Balance); + fn ensure_successful( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ); /// Ensure that a call to `check_payment` with the given parameters will return either `Success` /// or `Failure`. #[cfg(feature = "runtime-benchmarks")] @@ -81,19 +88,20 @@ impl> Pay for PayFromAccount { type Beneficiary = A::Type; type AssetKind = (); type Id = (); + type Error = DispatchError; fn pay( who: &Self::Beneficiary, _: Self::AssetKind, amount: Self::Balance, - ) -> Result { - >::transfer(&A::get(), who, amount, Expendable).map_err(|_| ())?; + ) -> Result { + >::transfer(&A::get(), who, amount, Expendable)?; Ok(()) } fn check_payment(_: ()) -> PaymentStatus { PaymentStatus::Success } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::Beneficiary, amount: Self::Balance) { + fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, amount: Self::Balance) { >::mint_into(&A::get(), amount).unwrap(); } #[cfg(feature = "runtime-benchmarks")] diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index bebc248721c99..cb18f9d5b71c0 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -19,6 +19,7 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::AtLeast32BitUnsigned; +use sp_runtime::TryRuntimeError; use sp_std::prelude::*; /// Which state tests to execute. @@ -129,7 +130,7 @@ impl core::str::FromStr for UpgradeCheckSelect { /// This hook should not alter any storage. pub trait TryState { /// Execute the state checks. - fn try_state(_: BlockNumber, _: Select) -> Result<(), &'static str>; + fn try_state(_: BlockNumber, _: Select) -> Result<(), TryRuntimeError>; } #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] @@ -139,7 +140,7 @@ impl TryState Result<(), &'static str> { + fn try_state(n: BlockNumber, targets: Select) -> Result<(), TryRuntimeError> { match targets { Select::None => Ok(()), Select::All => { @@ -148,7 +149,7 @@ impl TryState { - let functions: &[fn(BlockNumber, Select) -> Result<(), &'static str>] = + let functions: &[fn(BlockNumber, Select) -> Result<(), TryRuntimeError>] = &[for_tuples!(#( Tuple::try_state ),*)]; let skip = n.clone() % (functions.len() as u32).into(); let skip: u32 = @@ -163,7 +164,7 @@ impl TryState { let try_state_fns: &[( &'static str, - fn(BlockNumber, Select) -> Result<(), &'static str>, + fn(BlockNumber, Select) -> Result<(), TryRuntimeError>, )] = &[for_tuples!( #( (::name(), Tuple::try_state) ),* )]; diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index caec472785782..4201b8d48d157 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -20,81 +20,10 @@ use crate::dispatch::{DispatchError, Parameter}; use codec::{HasCompact, MaxEncodedLen}; -use sp_arithmetic::{ - traits::{SaturatedConversion, UniqueSaturatedFrom, UniqueSaturatedInto}, - Perbill, -}; +use sp_arithmetic::Perbill; use sp_runtime::traits::Member; use sp_std::prelude::*; -/// A trait similar to `Convert` to convert values from `B` an abstract balance type -/// into u64 and back from u128. (This conversion is used in election and other places where complex -/// calculation over balance type is needed) -/// -/// Total issuance of the currency is passed in, but an implementation of this trait may or may not -/// use it. -/// -/// # WARNING -/// -/// the total issuance being passed in implies that the implementation must be aware of the fact -/// that its values can affect the outcome. This implies that if the vote value is dependent on the -/// total issuance, it should never ber written to storage for later re-use. -pub trait CurrencyToVote { - /// Convert balance to u64. - fn to_vote(value: B, issuance: B) -> u64; - - /// Convert u128 to balance. - fn to_currency(value: u128, issuance: B) -> B; -} - -/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. -/// -/// The factor is the `(total_issuance / u64::MAX).max(1)`, represented as u64. Let's look at the -/// important cases: -/// -/// If the chain's total issuance is less than u64::MAX, this will always be 1, which means that -/// the factor will not have any effect. In this case, any account's balance is also less. Thus, -/// both of the conversions are basically an `as`; Any balance can fit in u64. -/// -/// If the chain's total issuance is more than 2*u64::MAX, then a factor might be multiplied and -/// divided upon conversion. -pub struct U128CurrencyToVote; - -impl U128CurrencyToVote { - fn factor(issuance: u128) -> u128 { - (issuance / u64::MAX as u128).max(1) - } -} - -impl CurrencyToVote for U128CurrencyToVote { - fn to_vote(value: u128, issuance: u128) -> u64 { - (value / Self::factor(issuance)).saturated_into() - } - - fn to_currency(value: u128, issuance: u128) -> u128 { - value.saturating_mul(Self::factor(issuance)) - } -} - -/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. -/// -/// # Warning -/// -/// This is designed to be used mostly for testing. Use with care, and think about the consequences. -pub struct SaturatingCurrencyToVote; - -impl + UniqueSaturatedFrom> CurrencyToVote - for SaturatingCurrencyToVote -{ - fn to_vote(value: B, _: B) -> u64 { - value.unique_saturated_into() - } - - fn to_currency(value: u128, _: B) -> B { - B::unique_saturated_from(value) - } -} - pub trait VoteTally { fn new(_: Class) -> Self; fn ayes(&self, class: Class) -> Votes; diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 75eba8fbe9883..7941f2a89a550 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -22,7 +22,6 @@ mod extrinsic_weights; mod paritydb_weights; mod rocksdb_weights; -use crate::dispatch; pub use sp_weights::*; /// These constants are specific to FRAME, and the current implementation of its various components. @@ -38,70 +37,3 @@ pub mod constants { paritydb_weights::constants::ParityDbWeight, rocksdb_weights::constants::RocksDbWeight, }; } - -#[deprecated = "Function has moved to `frame_support::dispatch`"] -pub fn extract_actual_pays_fee( - res: &dispatch::DispatchResultWithPostInfo, - info: &dispatch::DispatchInfo, -) -> dispatch::Pays { - dispatch::extract_actual_pays_fee(res, info) -} -#[deprecated = "Function has moved to `frame_support::dispatch`"] -pub fn extract_actual_weight( - res: &dispatch::DispatchResultWithPostInfo, - info: &dispatch::DispatchInfo, -) -> Weight { - dispatch::extract_actual_weight(res, info) -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait ClassifyDispatch: dispatch::ClassifyDispatch { - fn classify_dispatch(&self, target: T) -> dispatch::DispatchClass { - >::classify_dispatch(self, target) - } -} -#[deprecated = "Enum has moved to `frame_support::dispatch`"] -pub type DispatchClass = dispatch::DispatchClass; -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type DispatchInfo = dispatch::DispatchInfo; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait GetDispatchInfo: dispatch::GetDispatchInfo { - fn get_dispatch_info(&self) -> dispatch::DispatchInfo { - ::get_dispatch_info(self) - } -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait OneOrMany: dispatch::OneOrMany { - fn into_iter(self) -> Self::Iter - where - Self: Sized, - { - >::into_iter(self) - } -} -#[deprecated = "Enum has moved to `frame_support::dispatch`"] -pub type Pays = dispatch::Pays; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait PaysFee: dispatch::PaysFee { - fn pays_fee(&self, target: T) -> dispatch::Pays { - >::pays_fee(self, target) - } -} -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type PerDispatchClass = dispatch::PerDispatchClass; -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type PostDispatchInfo = dispatch::PostDispatchInfo; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait WeighData: dispatch::WeighData { - fn weigh_data(&self, target: T) -> Weight { - >::weigh_data(self, target) - } -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait WithPostDispatchInfo: dispatch::WithPostDispatchInfo { - fn with_weight(self, actual_weight: Weight) -> dispatch::DispatchErrorWithPostInfo - where - Self: Sized, - { - ::with_weight(self, actual_weight) - } -} diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index b358aa473bfd7..57a68554755ab 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06 (Y/M/D) -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2023-06-16 (Y/M/D) +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -44,17 +44,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 386_820, 419_676 - /// Average: 392_184 - /// Median: 389_668 - /// Std-Dev: 5285.57 + /// Min, Max: 376_949, 622_462 + /// Average: 390_584 + /// Median: 386_322 + /// Std-Dev: 24792.0 /// /// Percentiles nanoseconds: - /// 99th: 406_316 - /// 95th: 399_697 - /// 75th: 396_532 + /// 99th: 433_299 + /// 95th: 402_688 + /// 75th: 391_645 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(392_184), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(390_584), 0); } #[cfg(test)] diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 1a6facc3d42c3..a304f089ff782 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06 (Y/M/D) -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2023-06-16 (Y/M/D) +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -44,17 +44,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 113_246, 114_346 - /// Average: 113_638 - /// Median: 113_641 - /// Std-Dev: 188.44 + /// Min, Max: 123_875, 128_419 + /// Average: 124_414 + /// Median: 124_332 + /// Std-Dev: 497.74 /// /// Percentiles nanoseconds: - /// 99th: 114_181 - /// 95th: 113_961 - /// 75th: 113_703 + /// 99th: 125_245 + /// 95th: 124_989 + /// 75th: 124_498 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(113_638), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(124_414), 0); } #[cfg(test)] diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 68411210f9b5c..af4d3afd4da98 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -12,19 +12,20 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.136", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +static_assertions = "1.1.0" +serde = { version = "1.0.163", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../../../primitives/arithmetic" } -sp-io = { version = "7.0.0", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.13.0", optional = true, path = "../../../primitives/state-machine" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../../../primitives/arithmetic" } +sp-io = { version = "23.0.0", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.28.0", optional = true, path = "../../../primitives/state-machine" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } -sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../primitives/version" } trybuild = { version = "1.0.74", features = [ "diff" ] } pretty_assertions = "1.3.0" rustversion = "1.0.6" @@ -36,26 +37,29 @@ test-pallet = { package = "frame-support-test-pallet", default-features = false, [features] default = ["std"] std = [ - "serde/std", "codec/std", - "scale-info/std", "frame-benchmarking/std", "frame-executive/std", "frame-support/std", "frame-system/std", + "scale-info/std", + "serde/std", + "sp-api/std", + "sp-arithmetic/std", "sp-core/std", - "sp-std/std", "sp-io/std", "sp-runtime/std", - "sp-state-machine", - "sp-arithmetic/std", + "sp-std/std", "sp-version/std", - "sp-api/std", + "test-pallet/std", + "sp-state-machine/std" ] +experimental = ["frame-support/experimental"] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "frame-executive/try-runtime", + "sp-runtime/try-runtime" ] # WARNING: # Only CI runs with this feature enabled. This feature is for testing stuff related to the FRAME macros diff --git a/frame/support/test/compile_pass/Cargo.toml b/frame/support/test/compile_pass/Cargo.toml index 353847b4b2b7c..5d6caa002b372 100644 --- a/frame/support/test/compile_pass/Cargo.toml +++ b/frame/support/test/compile_pass/Cargo.toml @@ -12,13 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", version = "4.0.0-dev", default-features = false, path = "../../" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../../primitives/core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-version = { version = "5.0.0", default-features = false, path = "../../../../primitives/version" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../../primitives/runtime" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../../primitives/version" } [features] default = ["std"] diff --git a/frame/support/test/compile_pass/src/lib.rs b/frame/support/test/compile_pass/src/lib.rs index 4eaa657b1e486..bf90d73acb320 100644 --- a/frame/support/test/compile_pass/src/lib.rs +++ b/frame/support/test/compile_pass/src/lib.rs @@ -45,7 +45,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; -pub type Index = u64; parameter_types! { pub const Version: RuntimeVersion = VERSION; @@ -55,16 +54,15 @@ impl frame_system::Config for Runtime { type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); - type Index = u128; + type Nonce = u128; type Hash = H256; type Hashing = BlakeTwo256; - type Header = Header; + type Block = Block; type Lookup = IdentityLookup; type BlockHashCount = ConstU64<2400>; type Version = Version; type AccountData = (); type RuntimeOrigin = RuntimeOrigin; - type BlockNumber = BlockNumber; type AccountId = AccountId; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; @@ -83,12 +81,7 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub struct Runtime { System: frame_system, } ); diff --git a/frame/support/test/pallet/Cargo.toml b/frame/support/test/pallet/Cargo.toml index 135d0e64b8ff4..6edfcd78968d1 100644 --- a/frame/support/test/pallet/Cargo.toml +++ b/frame/support/test/pallet/Cargo.toml @@ -12,10 +12,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.163", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../system" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../../primitives/runtime" } [features] default = ["std"] @@ -24,4 +26,6 @@ std = [ "frame-support/std", "frame-system/std", "scale-info/std", + "serde/std", + "sp-runtime/std" ] diff --git a/frame/support/test/pallet/src/lib.rs b/frame/support/test/pallet/src/lib.rs index f2d0fd7b80d8d..49450659285d7 100644 --- a/frame/support/test/pallet/src/lib.rs +++ b/frame/support/test/pallet/src/lib.rs @@ -37,11 +37,14 @@ pub mod pallet { pub type Value = StorageValue<_, u32>; #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig {} + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + _config: core::marker::PhantomData, + } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index 5dccc88471a7b..6b38d42d33d0d 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -22,6 +22,7 @@ #![deny(warnings)] pub use frame_support::dispatch::RawOrigin; +use frame_system::pallet_prelude::BlockNumberFor; pub use self::pallet::*; @@ -34,7 +35,7 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - /// The configuration trait + /// The configuration trait. #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: 'static + Eq + Clone { @@ -126,12 +127,12 @@ pub mod pallet_prelude { /// tests! pub struct TestRandomness(sp_std::marker::PhantomData); -impl frame_support::traits::Randomness - for TestRandomness +impl + frame_support::traits::Randomness> for TestRandomness where T: frame_system::Config, { - fn random(subject: &[u8]) -> (Output, T::BlockNumber) { + fn random(subject: &[u8]) -> (Output, BlockNumberFor) { use sp_runtime::traits::TrailingZeroInput; ( diff --git a/frame/support/test/tests/common/mod.rs b/frame/support/test/tests/common/mod.rs new file mode 100644 index 0000000000000..b02ecc1b6e1dd --- /dev/null +++ b/frame/support/test/tests/common/mod.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! Common functionality between tests. +pub mod outer_enums; diff --git a/frame/support/test/tests/common/outer_enums.rs b/frame/support/test/tests/common/outer_enums.rs new file mode 100644 index 0000000000000..92dc7ac522079 --- /dev/null +++ b/frame/support/test/tests/common/outer_enums.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Create 3 pallets for testing the outer error enum construction: +// +// - `pallet`: declares an error with `#[pallet::error]` +// - `pallet2`: declares an error with `#[pallet::error]` +// - `pallet3`: does not declare an error. + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::event] + pub enum Event, I: 'static = ()> { + /// Something + Something(u32), + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + phantom: PhantomData<(T, I)>, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::error] + #[derive(PartialEq, Eq)] + pub enum Error { + /// doc comment put into metadata + InsufficientProposersBalance, + NonExistentStorageValue, + } +} + +#[frame_support::pallet] +pub mod pallet2 { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::event] + pub enum Event, I: 'static = ()> { + /// Something + Something(u32), + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + phantom: PhantomData<(T, I)>, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::error] + #[derive(PartialEq, Eq)] + pub enum Error { + /// doc comment put into metadata + OtherInsufficientProposersBalance, + OtherNonExistentStorageValue, + } +} + +#[frame_support::pallet] +pub mod pallet3 { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::event] + pub enum Event, I: 'static = ()> { + /// Something + Something(u32), + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + phantom: PhantomData<(T, I)>, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } +} diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 85e790095eb31..ff207ddf977c6 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -22,9 +22,13 @@ #![recursion_limit = "128"] use codec::MaxEncodedLen; -use frame_support::{parameter_types, traits::PalletInfo as _}; +use frame_support::{ + derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, +}; +use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_core::sr25519; +use sp_api::RuntimeVersion; +use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, @@ -37,9 +41,8 @@ parameter_types! { #[frame_support::pallet(dev_mode)] mod module1 { - use self::frame_system::pallet_prelude::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -75,10 +78,9 @@ mod module1 { #[frame_support::pallet(dev_mode)] mod module2 { - use self::frame_system::pallet_prelude::*; use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -122,10 +124,9 @@ mod nested { #[frame_support::pallet(dev_mode)] pub mod module3 { - use self::frame_system::pallet_prelude::*; use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -165,11 +166,14 @@ mod nested { } #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig {} + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } } @@ -177,10 +181,9 @@ mod nested { #[frame_support::pallet(dev_mode)] pub mod module3 { - use self::frame_system::pallet_prelude::*; use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -234,11 +237,14 @@ pub mod module3 { } #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig {} + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } } @@ -250,20 +256,15 @@ pub type Header = generic::Header; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; -use frame_support_test as system; - frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: system::{Pallet, Call, Event, Origin} = 30, + System: frame_system::{Pallet, Call, Event, Origin} = 30, Module1_1: module1::::{Pallet, Call, Storage, Event, Origin}, Module2: module2::{Pallet, Call, Storage, Event, Origin}, Module1_2: module1::::{Pallet, Call, Storage, Event, Origin}, - NestedModule3: nested::module3::{Pallet, Call, Config, Storage, Event, Origin}, - Module3: self::module3::{Pallet, Call, Config, Storage, Event, Origin}, + NestedModule3: nested::module3::{Pallet, Call, Config, Storage, Event, Origin}, + Module3: self::module3::{Pallet, Call, Config, Storage, Event, Origin}, Module1_3: module1::::{Pallet, Storage, Event } = 6, Module1_4: module1::::{Pallet, Call, Event } = 3, Module1_5: module1::::{Pallet, Event}, @@ -274,15 +275,18 @@ frame_support::construct_runtime!( } ); -impl frame_support_test::Config for Runtime { - type BlockNumber = BlockNumber; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); + type Block = Block; + type BlockHashCount = ConstU64<10>; } impl module1::Config for Runtime { @@ -330,7 +334,7 @@ fn test_pub() -> AccountId { fn check_modules_error_type() { sp_io::TestExternalities::default().execute_with(|| { assert_eq!( - Module1_1::fail(system::Origin::::Root.into()), + Module1_1::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 31, error: [0; 4], @@ -338,7 +342,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module2::fail(system::Origin::::Root.into()), + Module2::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 32, error: [0; 4], @@ -346,7 +350,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), + Module1_2::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 33, error: [0; 4], @@ -354,7 +358,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - NestedModule3::fail(system::Origin::::Root.into()), + NestedModule3::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 34, error: [0; 4], @@ -362,7 +366,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_3::fail(system::Origin::::Root.into()), + Module1_3::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 6, error: [0; 4], @@ -370,7 +374,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_4::fail(system::Origin::::Root.into()), + Module1_4::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 3, error: [0; 4], @@ -378,7 +382,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_5::fail(system::Origin::::Root.into()), + Module1_5::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 4, error: [0; 4], @@ -386,7 +390,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_6::fail(system::Origin::::Root.into()), + Module1_6::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 1, error: [0; 4], @@ -394,7 +398,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_7::fail(system::Origin::::Root.into()), + Module1_7::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 2, error: [0; 4], @@ -402,7 +406,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_8::fail(system::Origin::::Root.into()), + Module1_8::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 12, error: [0; 4], @@ -410,7 +414,7 @@ fn check_modules_error_type() { })), ); assert_eq!( - Module1_9::fail(system::Origin::::Root.into()), + Module1_9::fail(frame_system::Origin::::Root.into()), Err(DispatchError::Module(ModuleError { index: 13, error: [0; 4], @@ -430,7 +434,7 @@ fn integrity_test_works() { fn origin_codec() { use codec::Encode; - let origin = OriginCaller::system(system::RawOrigin::None); + let origin = OriginCaller::system(frame_system::RawOrigin::None); assert_eq!(origin.encode()[0], 30); let origin = OriginCaller::Module1_1(module1::Origin(Default::default())); @@ -465,7 +469,8 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = system::Event::::ExtrinsicSuccess; + let event = + frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); @@ -502,7 +507,7 @@ fn event_codec() { #[test] fn call_codec() { use codec::Encode; - assert_eq!(RuntimeCall::System(system::Call::noop {}).encode()[0], 30); + assert_eq!(RuntimeCall::System(frame_system::Call::remark { remark: vec![1] }).encode()[0], 30); assert_eq!(RuntimeCall::Module1_1(module1::Call::fail {}).encode()[0], 31); assert_eq!(RuntimeCall::Module2(module2::Call::fail {}).encode()[0], 32); assert_eq!(RuntimeCall::Module1_2(module1::Call::fail {}).encode()[0], 33); @@ -631,17 +636,69 @@ fn call_subtype_conversion() { #[test] fn test_metadata() { - use frame_support::metadata::*; + use frame_support::metadata::{v14::*, *}; use scale_info::meta_type; + use sp_core::Encode; + + fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } + } let pallets = vec![ PalletMetadata { name: "System", storage: None, - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "BlockWeights", + ty: meta_type::(), + value: BlockWeights::default().encode(), + docs: maybe_docs(vec![" Block & extrinsics weights: base values and limits."]), + }, + PalletConstantMetadata { + name: "BlockLength", + ty: meta_type::(), + value: BlockLength::default().encode(), + docs: maybe_docs(vec![" The maximum length of a block (in bytes)."]), + }, + PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: 10u64.encode(), + docs: maybe_docs(vec![" Maximum number of block number to block hash mappings to keep (oldest pruned first)."]), + }, + PalletConstantMetadata { + name: "DbWeight", + ty: meta_type::(), + value: RuntimeDbWeight::default().encode(), + docs: maybe_docs(vec![" The weight of runtime database operations the runtime can invoke.",]), + }, + PalletConstantMetadata { + name: "Version", + ty: meta_type::(), + value: RuntimeVersion::default().encode(), + docs: maybe_docs(vec![ " Get the chain's current version."]), + }, + PalletConstantMetadata { + name: "SS58Prefix", + ty: meta_type::(), + value: 0u16.encode(), + docs: maybe_docs(vec![ + " The designated SS58 prefix of this chain.", + "", + " This replaces the \"ss58Format\" property declared in the chain spec. Reason is", + " that the runtime should know about the prefix in order to make use of it as", + " an identifier of the chain.", + ]), + }, + ], + error: Some(meta_type::>().into()), index: 30, }, PalletMetadata { @@ -775,7 +832,7 @@ fn test_metadata() { fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::index::().unwrap(), 30); assert_eq!(PalletInfo::name::().unwrap(), "System"); - assert_eq!(PalletInfo::module_name::().unwrap(), "system"); + assert_eq!(PalletInfo::module_name::().unwrap(), "frame_system"); assert!(PalletInfo::crate_version::().is_some()); assert_eq!(PalletInfo::index::().unwrap(), 31); diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs index ea468d6de13ee..4cb249714650e 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs @@ -20,12 +20,9 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic}, + System: system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, } } diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr index b1c1879aa56ad..1ea62b7d6fd65 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -1,7 +1,7 @@ error: Unexpected tokens, expected one of `=`, `,` - --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:29:43 + --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:26:43 | -29 | Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, +26 | Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, | ^^^^^^^^^ error[E0412]: cannot find type `RuntimeCall` in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs index dd8340daa0233..513fbcfb51354 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, Balance: balances::{Pallet}, diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr index 27c5644e0d736..6fb983f03a961 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr @@ -1,11 +1,11 @@ error: Two pallets with the same name! - --> $DIR/conflicting_module_name.rs:10:3 - | -10 | Balance: balances::{Pallet}, - | ^^^^^^^ + --> tests/construct_runtime_ui/conflicting_module_name.rs:7:3 + | +7 | Balance: balances::{Pallet}, + | ^^^^^^^ error: Two pallets with the same name! - --> $DIR/conflicting_module_name.rs:11:3 - | -11 | Balance: balances::{Pallet}, - | ^^^^^^^ + --> tests/construct_runtime_ui/conflicting_module_name.rs:8:3 + | +8 | Balance: balances::{Pallet}, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.rs b/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.rs new file mode 100644 index 0000000000000..c0e325085b5e5 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.rs @@ -0,0 +1,13 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub struct Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = Uxt, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr new file mode 100644 index 0000000000000..946277e9068e3 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -0,0 +1,442 @@ +error: use of deprecated constant `WhereSection::_w`: + It is deprecated to use a `where` clause in `construct_runtime`. + Please instead use `frame_system::Config` to set the `Block` type and delete this clause. + It is planned to be removed in December 2023. + + For more info see: + + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | / construct_runtime! { +4 | | pub struct Runtime where +5 | | Block = Block, +6 | | NodeBlock = Block, +... | +10 | | } +11 | | } + | |_^ + | + = note: `-D deprecated` implied by `-D warnings` + = note: this error originates in the macro `frame_support::match_and_insert` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required by a bound in `frame_system::Event` + --> $WORKSPACE/frame/system/src/lib.rs + | + | pub enum Event { + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required because it appears within the type `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Clone` + --> $RUST/core/src/clone.rs + | + | pub trait Clone: Sized { + | ^^^^^ required by this bound in `Clone` + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required because it appears within the type `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `EncodeLike` + --> $CARGO/parity-scale-codec-3.6.1/src/encode_like.rs + | + | pub trait EncodeLike: Sized + Encode {} + | ^^^^^ required by this bound in `EncodeLike` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required because it appears within the type `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Decode` + --> $CARGO/parity-scale-codec-3.6.1/src/codec.rs + | + | pub trait Decode: Sized { + | ^^^^^ required by this bound in `Decode` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required because it appears within the type `Event` +note: required by a bound in `From` + --> $RUST/core/src/convert/mod.rs + | + | pub trait From: Sized { + | ^ required by this bound in `From` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required because it appears within the type `Event` +note: required by a bound in `TryInto` + --> $RUST/core/src/convert/mod.rs + | + | pub trait TryInto: Sized { + | ^ required by this bound in `TryInto` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | construct_runtime! { + | ^ the trait `Config` is not implemented for `Runtime` + | + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Clone` + --> $RUST/core/src/clone.rs + | + | pub trait Clone: Sized { + | ^^^^^ required by this bound in `Clone` + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `EncodeLike` + --> $CARGO/parity-scale-codec-3.6.1/src/encode_like.rs + | + | pub trait EncodeLike: Sized + Encode {} + | ^^^^^ required by this bound in `EncodeLike` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Decode` + --> $CARGO/parity-scale-codec-3.6.1/src/codec.rs + | + | pub trait Decode: Sized { + | ^^^^^ required by this bound in `Decode` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:9:3 + | +9 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ the trait `Config` is not implemented for `Runtime` + | +note: required by a bound in `frame_system::GenesisConfig` + --> $WORKSPACE/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required because it appears within the type `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Result` + --> $RUST/core/src/result.rs + | + | pub enum Result { + | ^ required by this bound in `Result` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | +note: required because it appears within the type `RuntimeEvent` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `TryInto` + --> $RUST/core/src/convert/mod.rs + | + | pub trait TryInto: Sized { + | ^^^^^ required by this bound in `TryInto` + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:3:1 + | +3 | // construct_runtime! { +4 | || pub struct Runtime where +5 | || Block = Block, +6 | || NodeBlock = Block, +... || +10 | || } +11 | || } + | ||_- in this macro invocation +... | +note: required by a bound in `Result` + --> $RUST/core/src/result.rs + | + | pub enum Result { + | ^ required by this bound in `Result` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs index 3269d22434fdf..68a2523d3bcb2 100644 --- a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, Balance: balances::{Config, Call, Config, Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/double_module_parts.stderr b/frame/support/test/tests/construct_runtime_ui/double_module_parts.stderr index 9d10474ce85ab..e3f694781441f 100644 --- a/frame/support/test/tests/construct_runtime_ui/double_module_parts.stderr +++ b/frame/support/test/tests/construct_runtime_ui/double_module_parts.stderr @@ -1,5 +1,5 @@ error: `Config` was already declared before. Please remove the duplicate declaration - --> $DIR/double_module_parts.rs:10:37 - | -10 | Balance: balances::{Config, Call, Config, Origin}, - | ^^^^^^ + --> tests/construct_runtime_ui/double_module_parts.rs:7:37 + | +7 | Balance: balances::{Config, Call, Config, Origin}, + | ^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs index 44b7fd0ba25f7..23badd76276e2 100644 --- a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs +++ b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system: , } diff --git a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr index 7102076e5acb0..f0c0f17779d67 100644 --- a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr +++ b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr @@ -1,5 +1,5 @@ error: expected one of: `crate`, `self`, `super`, identifier - --> $DIR/empty_pallet_path.rs:9:11 + --> tests/construct_runtime_ui/empty_pallet_path.rs:6:11 | -9 | system: , +6 | system: , | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs index 83a166fc00552..10cda7b4e7e8a 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs @@ -25,12 +25,9 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic}, + System: system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet exclude_parts { Call }, } } diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr index 66098898bb877..4b85613838ab5 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -1,7 +1,7 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> tests/construct_runtime_ui/exclude_undefined_part.rs:34:34 + --> tests/construct_runtime_ui/exclude_undefined_part.rs:31:34 | -34 | Pallet: pallet exclude_parts { Call }, +31 | Pallet: pallet exclude_parts { Call }, | ^^^^ error[E0412]: cannot find type `RuntimeCall` in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs index 7ab4329110d8b..35d49a4d8a23b 100644 --- a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs @@ -1,13 +1,10 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { #[cfg(test)] - System: frame_system::{Pallet, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr index a86a839615aa0..6a6c4b4158889 100644 --- a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr @@ -1,5 +1,5 @@ error: `System` pallet declaration is feature gated, please remove any `#[cfg]` attributes - --> tests/construct_runtime_ui/feature_gated_system_pallet.rs:10:3 - | -10 | System: frame_system::{Pallet, Call, Storage, Config, Event}, - | ^^^^^^ + --> tests/construct_runtime_ui/feature_gated_system_pallet.rs:7:3 + | +7 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs index 0912ffc98d54b..1ad1f8e0b1d5f 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, Balance: balances::::{Call, Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr index 06caa036b91ff..a6adb37d04949 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr @@ -1,5 +1,5 @@ -error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Origin`, `Config`. - --> $DIR/generics_in_invalid_module.rs:10:36 - | -10 | Balance: balances::::{Call, Origin}, - | ^^^^ +error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Error`, `Origin`, `Config`. + --> tests/construct_runtime_ui/generics_in_invalid_module.rs:7:36 + | +7 | Balance: balances::::{Call, Origin}, + | ^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs index c253444ee31db..bce87c51336eb 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, #[cfg(feature = 1)] diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr index 68366a3410bf1..bfee2910cd2a4 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr @@ -1,6 +1,6 @@ error: feature = 1 ^ expected one of ``, `all`, `any`, `not` here - --> tests/construct_runtime_ui/invalid_meta_literal.rs:10:3 - | -10 | #[cfg(feature = 1)] - | ^ + --> tests/construct_runtime_ui/invalid_meta_literal.rs:7:3 + | +7 | #[cfg(feature = 1)] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.rs b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.rs index 9fb3169e1df77..bf6919f5a58ef 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system: System::(), } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr index 0a20cf4e39a88..1f9277c3f0a8e 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr @@ -1,5 +1,5 @@ error: Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,` - --> tests/construct_runtime_ui/invalid_module_details.rs:9:17 + --> tests/construct_runtime_ui/invalid_module_details.rs:6:17 | -9 | system: System::(), +6 | system: System::(), | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.rs b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.rs index 6ba268b73eea6..51f14e6883e4a 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system: System::{enum}, } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr index 42f79e96d4473..dfcc9b8be42c6 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr @@ -1,5 +1,5 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` - --> $DIR/invalid_module_details_keyword.rs:9:20 +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` + --> tests/construct_runtime_ui/invalid_module_details_keyword.rs:6:20 | -9 | system: System::{enum}, +6 | system: System::{enum}, | ^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs index d627ffd5b66f7..607741d7823d4 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs @@ -1,13 +1,10 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, - Balance: balances::{Error}, + Balance: balances::{Unexpected}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr index 6d535ca4335fc..9dd849ff0412e 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr @@ -1,5 +1,5 @@ -error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` - --> $DIR/invalid_module_entry.rs:10:23 - | -10 | Balance: balances::{Error}, - | ^^^^^ +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Error`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned`, `FreezeReason`, `HoldReason`, `LockId`, `SlashReason` + --> tests/construct_runtime_ui/invalid_module_entry.rs:7:23 + | +7 | Balance: balances::{Unexpected}, + | ^^^^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.rs b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.rs index 09c316e6ebaed..c132fa01b2297 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system: System ? } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr index 6025de82bd206..80be1b8dd42fd 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr @@ -1,5 +1,5 @@ error: Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,` - --> $DIR/invalid_token_after_module.rs:9:18 + --> tests/construct_runtime_ui/invalid_token_after_module.rs:6:18 | -9 | system: System ? +6 | system: System ? | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.rs b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.rs index 18d367d102d3a..42e7759f87f2b 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system ? } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.stderr index eaae082c8460c..8988f8a35b0a4 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_name.stderr @@ -1,5 +1,5 @@ error: expected `:` - --> $DIR/invalid_token_after_name.rs:9:10 + --> tests/construct_runtime_ui/invalid_token_after_name.rs:6:10 | -9 | system ? +6 | system ? | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs index 3cd2f157d0475..bc2039c4e8180 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs @@ -1,13 +1,10 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: system::{Pallet}, - Balance: balances::::{Event}, + System: system expanded::{}::{Pallet}, + Balance: balances:: expanded::{}::{Event}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr index b1aa9b86cd0d6..30fcba4c710d0 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr @@ -1,5 +1,5 @@ error: Instantiable pallet with no generic `Event` cannot be constructed: pallet `Balance` must have generic `Event` - --> $DIR/missing_event_generic_on_module_with_instance.rs:10:3 - | -10 | Balance: balances::::{Event}, - | ^^^^^^^ + --> tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs:7:3 + | +7 | Balance: balances:: expanded::{}::{Event}, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/missing_module_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_module_instance.rs index 24e4ee979bd76..afd96a04854f2 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_module_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_module_instance.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { system: System::<>, } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_module_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_module_instance.stderr index 6303c74e42e5c..5072f718db12e 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_module_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_module_instance.stderr @@ -1,5 +1,5 @@ error: expected identifier - --> $DIR/missing_module_instance.rs:9:20 + --> tests/construct_runtime_ui/missing_module_instance.rs:6:20 | -9 | system: System::<>, +6 | system: System::<>, | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs index 787ba20117678..42db63ae90a3a 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs @@ -1,13 +1,10 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: system::{Pallet}, - Balance: balances::::{Origin}, + System: system expanded::{}::{Pallet}, + Balance: balances:: expanded::{}::{Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr index 63bb7442a8576..6c076d7b49fc0 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr @@ -1,5 +1,5 @@ error: Instantiable pallet with no generic `Origin` cannot be constructed: pallet `Balance` must have generic `Origin` - --> $DIR/missing_origin_generic_on_module_with_instance.rs:10:3 - | -10 | Balance: balances::::{Origin}, - | ^^^^^^^ + --> tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs:7:3 + | +7 | Balance: balances:: expanded::{}::{Origin}, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.rs b/frame/support/test/tests/construct_runtime_ui/missing_system_module.rs index 7ab902c3aadd8..685f9059b1be2 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr index 7648f5c1bfb33..c8631f44051ca 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr @@ -1,6 +1,6 @@ -error: `System` pallet declaration is missing. Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},` - --> $DIR/missing_system_module.rs:8:2 +error: `System` pallet declaration is missing. Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},` + --> tests/construct_runtime_ui/missing_system_module.rs:5:2 | -8 | / { -9 | | } +5 | / { +6 | | } | |_____^ diff --git a/frame/support/test/tests/construct_runtime_ui/missing_where_block.rs b/frame/support/test/tests/construct_runtime_ui/missing_where_block.rs deleted file mode 100644 index 303df6b03d72e..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/missing_where_block.rs +++ /dev/null @@ -1,7 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub struct Runtime {} -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/missing_where_block.stderr b/frame/support/test/tests/construct_runtime_ui/missing_where_block.stderr deleted file mode 100644 index d2a66f95101f4..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/missing_where_block.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: expected `where` - --> tests/construct_runtime_ui/missing_where_block.rs:4:21 - | -4 | pub struct Runtime {} - | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr deleted file mode 100644 index afa210c1ae59d..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ /dev/null @@ -1,130 +0,0 @@ -error: `Pallet` does not have the std feature enabled, this will cause the `test_pallet::GenesisConfig` type to not implement serde traits. - --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 - | -40 | / construct_runtime! { -41 | | pub struct Runtime where -42 | | Block = Block, -43 | | NodeBlock = Block, -... | -48 | | } -49 | | } - | |_^ - | - = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `frame_support_test_pallet::GenesisConfig: Serialize` is not satisfied - --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 - | -40 | / construct_runtime! { -41 | | pub struct Runtime where -42 | | Block = Block, -43 | | NodeBlock = Block, -... | -48 | | } -49 | | } - | |_^ the trait `Serialize` is not implemented for `frame_support_test_pallet::GenesisConfig` - | - = help: the following other types implement trait `Serialize`: - &'a T - &'a mut T - () - (T0, T1) - (T0, T1, T2) - (T0, T1, T2, T3) - (T0, T1, T2, T3, T4) - (T0, T1, T2, T3, T4, T5) - and $N others -note: required by a bound in `hidden_include::serde::ser::SerializeStruct::serialize_field` - --> $CARGO/serde-1.0.162/src/ser/mod.rs - | - | T: Serialize; - | ^^^^^^^^^ required by this bound in `SerializeStruct::serialize_field` - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `frame_support_test_pallet::GenesisConfig: Deserialize<'_>` is not satisfied - --> tests/construct_runtime_ui/no_std_genesis_config.rs:47:3 - | -47 | Pallet: test_pallet::{Pallet, Config}, - | ^^^^^^ the trait `Deserialize<'_>` is not implemented for `frame_support_test_pallet::GenesisConfig` - | - = help: the following other types implement trait `Deserialize<'de>`: - <&'a [u8] as Deserialize<'de>> - <&'a std::path::Path as Deserialize<'de>> - <&'a str as Deserialize<'de>> - <() as Deserialize<'de>> - <(T0, T1) as Deserialize<'de>> - <(T0, T1, T2) as Deserialize<'de>> - <(T0, T1, T2, T3) as Deserialize<'de>> - <(T0, T1, T2, T3, T4) as Deserialize<'de>> - and $N others -note: required by a bound in `next_element` - --> $CARGO/serde-1.0.162/src/de/mod.rs - | - | T: Deserialize<'de>, - | ^^^^^^^^^^^^^^^^ required by this bound in `SeqAccess::next_element` - -error[E0277]: the trait bound `frame_support_test_pallet::GenesisConfig: Deserialize<'_>` is not satisfied - --> tests/construct_runtime_ui/no_std_genesis_config.rs:47:3 - | -47 | Pallet: test_pallet::{Pallet, Config}, - | ^^^^^^ the trait `Deserialize<'_>` is not implemented for `frame_support_test_pallet::GenesisConfig` - | - = help: the following other types implement trait `Deserialize<'de>`: - <&'a [u8] as Deserialize<'de>> - <&'a std::path::Path as Deserialize<'de>> - <&'a str as Deserialize<'de>> - <() as Deserialize<'de>> - <(T0, T1) as Deserialize<'de>> - <(T0, T1, T2) as Deserialize<'de>> - <(T0, T1, T2, T3) as Deserialize<'de>> - <(T0, T1, T2, T3, T4) as Deserialize<'de>> - and $N others -note: required by a bound in `next_value` - --> $CARGO/serde-1.0.162/src/de/mod.rs - | - | V: Deserialize<'de>, - | ^^^^^^^^^^^^^^^^ required by this bound in `MapAccess::next_value` - -error[E0277]: the trait bound `frame_support_test_pallet::GenesisConfig: Deserialize<'_>` is not satisfied - --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 - | -40 | / construct_runtime! { -41 | | pub struct Runtime where -42 | | Block = Block, -43 | | NodeBlock = Block, -... | -48 | | } -49 | | } - | |_^ the trait `Deserialize<'_>` is not implemented for `frame_support_test_pallet::GenesisConfig` - | - = help: the following other types implement trait `Deserialize<'de>`: - <&'a [u8] as Deserialize<'de>> - <&'a std::path::Path as Deserialize<'de>> - <&'a str as Deserialize<'de>> - <() as Deserialize<'de>> - <(T0, T1) as Deserialize<'de>> - <(T0, T1, T2) as Deserialize<'de>> - <(T0, T1, T2, T3) as Deserialize<'de>> - <(T0, T1, T2, T3, T4) as Deserialize<'de>> - and $N others -note: required by a bound in `hidden_include::serde::__private::de::missing_field` - --> $CARGO/serde-1.0.162/src/private/de.rs - | - | V: Deserialize<'de>, - | ^^^^^^^^^^^^^^^^ required by this bound in `missing_field` - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `frame_support_test_pallet::GenesisConfig: BuildModuleGenesisStorage` is not satisfied - --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 - | -40 | / construct_runtime! { -41 | | pub struct Runtime where -42 | | Block = Block, -43 | | NodeBlock = Block, -... | -48 | | } -49 | | } - | |_^ the trait `BuildModuleGenesisStorage` is not implemented for `frame_support_test_pallet::GenesisConfig` - | - = help: the trait `BuildModuleGenesisStorage` is implemented for `frame_system::GenesisConfig` - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs index 5dfc67c83836a..0d6afbcdc2c65 100644 --- a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs +++ b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,12 +46,9 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, Pallet1: pallet::{Pallet}, Pallet2: pallet::{Pallet}, Pallet3: pallet::{Pallet}, diff --git a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index dbd81ef367a9f..55cef6704ee3f 100644 --- a/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -1,7 +1,7 @@ error: The number of pallets exceeds the maximum number of tuple elements. To increase this limit, enable the tuples-96 feature of [frame_support]. - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:2 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:49:2 | -50 | pub struct Runtime where +49 | pub struct Runtime | ^^^ error[E0412]: cannot find type `RuntimeCall` in this scope @@ -34,28 +34,28 @@ error[E0412]: cannot find type `RuntimeOrigin` in this scope | ^^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeOrigin` error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:27:21 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:26:21 | -27 | type RuntimeCall = RuntimeCall; +26 | type RuntimeCall = RuntimeCall; | ^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeCall` error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:33:22 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:32:22 | -33 | type RuntimeEvent = RuntimeEvent; +32 | type RuntimeEvent = RuntimeEvent; | ^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeEvent` error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:20 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:38:20 | -39 | type PalletInfo = PalletInfo; +38 | type PalletInfo = PalletInfo; | ^^^^^^^^^^ | help: you might have meant to use the associated type | -39 | type PalletInfo = Self::PalletInfo; +38 | type PalletInfo = Self::PalletInfo; | ~~~~~~~~~~~~~~~~ help: consider importing this trait | -1 | use frame_support::traits::PalletInfo; +1 + use frame_support::traits::PalletInfo; | diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index 866c3f0de6c3c..8b3e26bc5e2e4 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -18,22 +18,22 @@ mod pallet { #[derive(scale_info::TypeInfo, frame_support::PalletError, codec::Encode, codec::Decode)] pub enum Nested1 { - Nested2(Nested2) + Nested2(Nested2), } #[derive(scale_info::TypeInfo, frame_support::PalletError, codec::Encode, codec::Decode)] pub enum Nested2 { - Nested3(Nested3) + Nested3(Nested3), } #[derive(scale_info::TypeInfo, frame_support::PalletError, codec::Encode, codec::Decode)] pub enum Nested3 { - Nested4(Nested4) + Nested4(Nested4), } #[derive(scale_info::TypeInfo, frame_support::PalletError, codec::Encode, codec::Decode)] pub enum Nested4 { - Num(u8) + Num(u8), } pub type Signature = sr25519::Signature; @@ -47,14 +47,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -72,12 +71,9 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet::{Pallet}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index b9cec02a2b092..47504573515a2 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -1,13 +1,13 @@ error[E0080]: evaluation of constant value failed - --> tests/construct_runtime_ui/pallet_error_too_large.rs:74:1 + --> tests/construct_runtime_ui/pallet_error_too_large.rs:73:1 | -74 | / construct_runtime! { -75 | | pub struct Runtime where -76 | | Block = Block, -77 | | NodeBlock = Block, -... | -82 | | } -83 | | } - | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:74:1 +73 | / construct_runtime! { +74 | | pub struct Runtime +75 | | { +76 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +77 | | Pallet: pallet::{Pallet}, +78 | | } +79 | | } + | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:73:1 | = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index 0010f5277bb40..25cb5e93f652e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,12 +46,9 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet::{Pallet, Call}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index c2092edea05b5..f3f29e4c69554 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -4,13 +4,13 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet::{Pallet, Call}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index 35212df8f457c..c44cceef81a12 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,13 +46,10 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Pallet: pallet::{Pallet, Event}, + System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet expanded::{}::{Pallet, Event}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index daf027a5080cb..81e42cec3b97a 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -4,33 +4,33 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Event}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` - --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_event_part.rs:48:1 | -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Event}, +53 | | } +54 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | -1 | use frame_support_test::Event; +1 + use frame_support_test::Event; | -1 | use frame_system::Event; +1 + use frame_system::Event; | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index ec753e9a03129..4436202f04fc7 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,13 +46,10 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Pallet: pallet::{Pallet, Config}, + System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet expanded::{}::{Pallet, Config}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index a5f6b3a1093ea..920785fc96291 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -4,33 +4,33 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Config}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` - --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:48:1 | -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Config}, +53 | | } +54 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | -1 | use frame_system::GenesisConfig; +1 + use frame_system::GenesisConfig; | -1 | use test_pallet::GenesisConfig; +1 + use test_pallet::GenesisConfig; | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index 22eaccca42d97..8b48c4d0d6af7 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,13 +46,10 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Pallet: pallet::{Pallet, Inherent}, + System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet expanded::{}::{Pallet, Inherent}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index a068cab4cb1ab..659d43b151006 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -4,31 +4,31 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:48:1 | 11 | pub struct Pallet(_); | -------------------- function or associated item `create_inherent` not found for this struct ... -49 | construct_runtime! { +48 | construct_runtime! { | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -37,19 +37,19 @@ error[E0599]: no function or associated item named `create_inherent` found for s = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:48:1 | 11 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent` not found for this struct ... -49 | construct_runtime! { +48 | construct_runtime! { | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -58,19 +58,19 @@ error[E0599]: no function or associated item named `is_inherent` found for struc = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:48:1 | 11 | pub struct Pallet(_); | -------------------- function or associated item `check_inherent` not found for this struct ... -49 | construct_runtime! { +48 | construct_runtime! { | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -79,19 +79,19 @@ error[E0599]: no function or associated item named `check_inherent` found for st = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:48:1 | 11 | pub struct Pallet(_); | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct ... -49 | construct_runtime! { +48 | construct_runtime! { | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_^ associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -100,19 +100,19 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent_required` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:48:1 | 11 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent_required` not found for this struct ... -49 | construct_runtime! { +48 | construct_runtime! { | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +53 | | } +54 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 1705fff49dda8..974928785f748 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,13 +46,10 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Pallet: pallet::{Pallet, Origin}, + System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet expanded::{}::{Pallet, Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 3e4326d3f7372..c41dbe79421ea 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -4,33 +4,33 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` - --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_origin_part.rs:48:1 | -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +53 | | } +54 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | -1 | use frame_support_test::Origin; +1 + use frame_support_test::Origin; | -1 | use frame_system::Origin; +1 + use frame_system::Origin; | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 8f64d30940725..505b249d92d58 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; #[frame_support::pallet] mod pallet { @@ -22,14 +22,13 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -47,12 +46,9 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet::{Pallet, ValidateUnsigned}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index 84f1e54d5c24e..007b77250736e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -4,74 +4,76 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 5 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ ... -49 | / construct_runtime! { -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } +48 | / construct_runtime! { +49 | | pub struct Runtime +50 | | { +51 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +52 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +53 | | } +54 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:56:3 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:52:3 | -49 | // construct_runtime! { -50 | || pub struct Runtime where -51 | || Block = Block, -52 | || NodeBlock = Block, -... || -55 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +48 | // construct_runtime! { +49 | || pub struct Runtime +50 | || { +51 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +52 | || Pallet: pallet::{Pallet, ValidateUnsigned}, | || -^^^^^^ variant or associated item not found in `RuntimeCall` | ||________| | | -57 | | } -58 | | } - | |__- variant or associated item `Pallet` not found for this enum +... | error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:48:1 | -11 | pub struct Pallet(_); - | -------------------- function or associated item `pre_dispatch` not found for this struct +11 | pub struct Pallet(_); + | -------------------- function or associated item `pre_dispatch` not found for this struct ... -49 | construct_runtime! { - | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, +48 | construct_runtime! { + | __^ + | | _| + | || +49 | || pub struct Runtime +50 | || { +51 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +52 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +53 | || } +54 | || } + | ||_- in this macro invocation ... | -57 | | } -58 | | } - | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `pre_dispatch`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:48:1 | -11 | pub struct Pallet(_); - | -------------------- function or associated item `validate_unsigned` not found for this struct +11 | pub struct Pallet(_); + | -------------------- function or associated item `validate_unsigned` not found for this struct ... -49 | construct_runtime! { - | _^ -50 | | pub struct Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, +48 | construct_runtime! { + | __^ + | | _| + | || +49 | || pub struct Runtime +50 | || { +51 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +52 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +53 | || } +54 | || } + | ||_- in this macro invocation ... | -57 | | } -58 | | } - | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `validate_unsigned`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs index e5fd284dc8722..e4e2d3dca021e 100644 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, #[cfg(feature(test))] diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr index 98d99a0d34997..34637269db617 100644 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr @@ -1,6 +1,6 @@ error: feature(test) ^ expected one of `=`, `,`, `)` here - --> tests/construct_runtime_ui/unsupported_meta_structure.rs:10:3 - | -10 | #[cfg(feature(test))] - | ^ + --> tests/construct_runtime_ui/unsupported_meta_structure.rs:7:3 + | +7 | #[cfg(feature(test))] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs index 03363d30a6429..491cc2c90533d 100644 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs @@ -1,10 +1,7 @@ use frame_support::construct_runtime; construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { System: system::{Pallet}, #[attr] diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr index fceb2b8a99db8..da1b61b1c3078 100644 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr @@ -1,5 +1,5 @@ error: Unsupported attribute, only #[cfg] is supported on pallet declarations in `construct_runtime` - --> tests/construct_runtime_ui/unsupported_pallet_attr.rs:10:3 - | -10 | #[attr] - | ^ + --> tests/construct_runtime_ui/unsupported_pallet_attr.rs:7:3 + | +7 | #[attr] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs index 971e2b831ae08..8563be1008cd9 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs @@ -25,12 +25,9 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic}, + System: system::{Pallet, Call, Storage, Config, Event}, Pallet: pallet use_parts { Call }, } } diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr index cb6b6a44d61da..4058ccab2c5d7 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -1,7 +1,7 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> tests/construct_runtime_ui/use_undefined_part.rs:34:30 + --> tests/construct_runtime_ui/use_undefined_part.rs:31:30 | -34 | Pallet: pallet use_parts { Call }, +31 | Pallet: pallet use_parts { Call }, | ^^^^ error[E0412]: cannot find type `RuntimeCall` in this scope diff --git a/frame/support/test/tests/derive_impl_ui.rs b/frame/support/test/tests/derive_impl_ui.rs new file mode 100644 index 0000000000000..ee219d0670aaf --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(not(feature = "disable-ui-tests"))] +#![cfg(test)] + +#[rustversion::attr(not(stable), ignore)] +#[test] +fn derive_impl_ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Deny all warnings since we emit warnings as part of a Pallet's UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/derive_impl_ui/*.rs"); + t.pass("tests/derive_impl_ui/pass/*.rs"); +} diff --git a/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs b/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs new file mode 100644 index 0000000000000..3b27916933865 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs @@ -0,0 +1,41 @@ +use frame_support::*; + +pub trait Animal { + type Locomotion; + type Diet; + type SleepingStrategy; + type Environment; + + fn animal_name() -> &'static str; +} + +pub type RunsOnFourLegs = (usize, usize, usize, usize); +pub type RunsOnTwoLegs = (usize, usize); +pub type Swims = isize; +pub type Diurnal = bool; +pub type Nocturnal = Option; +pub type Omnivore = char; +pub type Land = ((), ()); +pub type Sea = ((), (), ()); +pub type Carnivore = (char, char); + +pub struct FourLeggedAnimal {} + +#[register_default_impl(FourLeggedAnimal)] +impl Animal for FourLeggedAnimal { + type Locomotion = RunsOnFourLegs; + type Diet = Omnivore; + type SleepingStrategy = Diurnal; + type Environment = Land; + + fn animal_name() -> &'static str { + "A Four-Legged Animal" + } +} + +pub struct AcquaticMammal {} + +#[derive_impl(FourLeggedAnimal as Animal)] +struct Something {} + +fn main() {} diff --git a/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.stderr b/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.stderr new file mode 100644 index 0000000000000..735fd7a628e77 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.stderr @@ -0,0 +1,5 @@ +error: expected `impl` + --> tests/derive_impl_ui/attached_to_non_impl.rs:39:1 + | +39 | struct Something {} + | ^^^^^^ diff --git a/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs b/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs new file mode 100644 index 0000000000000..2badd1830033b --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs @@ -0,0 +1,48 @@ +use frame_support::*; + +pub trait Animal { + type Locomotion; + type Diet; + type SleepingStrategy; + type Environment; + + fn animal_name() -> &'static str; +} + +pub type RunsOnFourLegs = (usize, usize, usize, usize); +pub type RunsOnTwoLegs = (usize, usize); +pub type Swims = isize; +pub type Diurnal = bool; +pub type Nocturnal = Option; +pub type Omnivore = char; +pub type Land = ((), ()); +pub type Sea = ((), (), ()); +pub type Carnivore = (char, char); + +pub struct FourLeggedAnimal {} + +#[register_default_impl(FourLeggedAnimal)] +impl Animal for FourLeggedAnimal { + type Locomotion = RunsOnFourLegs; + type Diet = Omnivore; + type SleepingStrategy = Diurnal; + type Environment = Land; + + fn animal_name() -> &'static str { + "A Four-Legged Animal" + } +} + +pub struct AcquaticMammal {} + +// Should throw: `error: cannot find macro `__export_tokens_tt_tiger` in this scope` +// +// Note that there is really no better way to clean up this error, tt_call suffers from the +// same downside but this is really the only rough edge when using macro magic. +#[derive_impl(Tiger as Animal)] +impl Animal for AcquaticMammal { + type Locomotion = (Swims, RunsOnFourLegs); + type Environment = (Land, Sea); +} + +fn main() {} diff --git a/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr b/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr new file mode 100644 index 0000000000000..1cac166246276 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr @@ -0,0 +1,7 @@ +error: cannot find macro `__export_tokens_tt_tiger` in this scope + --> tests/derive_impl_ui/bad_default_impl_path.rs:42:1 + | +42 | #[derive_impl(Tiger as Animal)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs b/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs new file mode 100644 index 0000000000000..adc5df23a759a --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs @@ -0,0 +1,44 @@ +use frame_support::*; + +pub trait Animal { + type Locomotion; + type Diet; + type SleepingStrategy; + type Environment; + + fn animal_name() -> &'static str; +} + +pub type RunsOnFourLegs = (usize, usize, usize, usize); +pub type RunsOnTwoLegs = (usize, usize); +pub type Swims = isize; +pub type Diurnal = bool; +pub type Nocturnal = Option; +pub type Omnivore = char; +pub type Land = ((), ()); +pub type Sea = ((), (), ()); +pub type Carnivore = (char, char); + +pub struct FourLeggedAnimal {} + +#[register_default_impl(FourLeggedAnimal)] +impl Animal for FourLeggedAnimal { + type Locomotion = RunsOnFourLegs; + type Diet = Omnivore; + type SleepingStrategy = Diurnal; + type Environment = Land; + + fn animal_name() -> &'static str { + "A Four-Legged Animal" + } +} + +pub struct AcquaticMammal {} + +#[derive_impl(FourLeggedAnimal as Insect)] +impl Animal for AcquaticMammal { + type Locomotion = (Swims, RunsOnFourLegs); + type Environment = (Land, Sea); +} + +fn main() {} diff --git a/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.stderr b/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.stderr new file mode 100644 index 0000000000000..6fd4e431beb52 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.stderr @@ -0,0 +1,5 @@ +error[E0405]: cannot find trait `Insect` in this scope + --> tests/derive_impl_ui/bad_disambiguation_path.rs:38:35 + | +38 | #[derive_impl(FourLeggedAnimal as Insect)] + | ^^^^^^ not found in this scope diff --git a/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs b/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs new file mode 100644 index 0000000000000..21f1cc32009a5 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs @@ -0,0 +1,44 @@ +use frame_support::*; + +pub trait Animal { + type Locomotion; + type Diet; + type SleepingStrategy; + type Environment; + + fn animal_name() -> &'static str; +} + +pub type RunsOnFourLegs = (usize, usize, usize, usize); +pub type RunsOnTwoLegs = (usize, usize); +pub type Swims = isize; +pub type Diurnal = bool; +pub type Nocturnal = Option; +pub type Omnivore = char; +pub type Land = ((), ()); +pub type Sea = ((), (), ()); +pub type Carnivore = (char, char); + +pub struct FourLeggedAnimal {} + +#[register_default_impl(FourLeggedAnimal)] +impl Animal for FourLeggedAnimal { + type Locomotion = RunsOnFourLegs; + type Diet = Omnivore; + type SleepingStrategy = Diurnal; + type Environment = Land; + + fn animal_name() -> &'static str { + "A Four-Legged Animal" + } +} + +pub struct AcquaticMammal {} + +#[derive_impl(FourLeggedAnimal as)] +impl Animal for AcquaticMammal { + type Locomotion = (Swims, RunsOnFourLegs); + type Environment = (Land, Sea); +} + +fn main() {} diff --git a/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.stderr b/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.stderr new file mode 100644 index 0000000000000..85cd94ae08ae7 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.stderr @@ -0,0 +1,7 @@ +error: unexpected end of input, expected identifier + --> tests/derive_impl_ui/missing_disambiguation_path.rs:38:1 + | +38 | #[derive_impl(FourLeggedAnimal as)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the attribute macro `derive_impl` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs b/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs new file mode 100644 index 0000000000000..336ddc315f8cb --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs @@ -0,0 +1,69 @@ +use frame_support::*; +use static_assertions::assert_type_eq_all; + +pub trait Animal { + type Locomotion; + type Diet; + type SleepingStrategy; + type Environment; + + fn animal_name() -> &'static str; +} + +pub type RunsOnFourLegs = (usize, usize, usize, usize); +pub type RunsOnTwoLegs = (usize, usize); +pub type Swims = isize; +pub type Diurnal = bool; +pub type Nocturnal = Option; +pub type Omnivore = char; +pub type Land = ((), ()); +pub type Sea = ((), (), ()); +pub type Carnivore = (char, char); + +pub struct FourLeggedAnimal {} + +#[register_default_impl(FourLeggedAnimal)] +impl Animal for FourLeggedAnimal { + type Locomotion = RunsOnFourLegs; + type Diet = Omnivore; + type SleepingStrategy = Diurnal; + type Environment = Land; + + fn animal_name() -> &'static str { + "A Four-Legged Animal" + } +} + +pub struct AcquaticMammal {} + +// without omitting the `as X` +#[derive_impl(FourLeggedAnimal as Animal)] +impl Animal for AcquaticMammal { + type Locomotion = (Swims, RunsOnFourLegs); + type Environment = (Land, Sea); +} + +assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); +assert_type_eq_all!(::Environment, (Land, Sea)); +assert_type_eq_all!(::Diet, Omnivore); +assert_type_eq_all!(::SleepingStrategy, Diurnal); + +pub struct Lion {} + +// test omitting the `as X` +#[derive_impl(FourLeggedAnimal)] +impl Animal for Lion { + type Diet = Carnivore; + type SleepingStrategy = Nocturnal; + + fn animal_name() -> &'static str { + "Lion" + } +} + +assert_type_eq_all!(::Diet, Carnivore); +assert_type_eq_all!(::SleepingStrategy, Nocturnal); +assert_type_eq_all!(::Environment, Land); +assert_type_eq_all!(::Locomotion, RunsOnFourLegs); + +fn main() {} diff --git a/frame/support/test/tests/derive_impl_ui/pass/macro_magic_working.rs b/frame/support/test/tests/derive_impl_ui/pass/macro_magic_working.rs new file mode 100644 index 0000000000000..ec09bd15e0173 --- /dev/null +++ b/frame/support/test/tests/derive_impl_ui/pass/macro_magic_working.rs @@ -0,0 +1,18 @@ +#[frame_support::macro_magic::export_tokens] +struct MyCoolStruct { + field: u32, +} + +// create a test receiver since `proc_support` isn't enabled so we're on our own in terms of +// what we can call +macro_rules! receiver { + ($_tokens_var:ident, $($tokens:tt)*) => { + stringify!($($tokens)*) + }; +} + +fn main() { + let _instance: MyCoolStruct = MyCoolStruct { field: 3 }; + let _str = __export_tokens_tt_my_cool_struct!(tokens, receiver); + // this compiling demonstrates that macro_magic is working properly +} diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index 610f6532c52d0..765afaf1e6604 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -16,8 +16,10 @@ // limitations under the License. use codec::Encode; -use frame_support::{storage::unhashed, StoragePrefixedMap}; -use sp_core::sr25519; +use frame_support::{derive_impl, storage::unhashed, StoragePrefixedMap}; +use frame_system::pallet_prelude::BlockNumberFor; + +use sp_core::{sr25519, ConstU32}; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, @@ -31,10 +33,9 @@ use sp_runtime::{ mod no_instance { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config {} @@ -59,7 +60,7 @@ mod no_instance { #[pallet::storage] #[pallet::getter(fn test_generic_value)] - pub type TestGenericValue = StorageValue<_, T::BlockNumber, OptionQuery>; + pub type TestGenericValue = StorageValue<_, BlockNumberFor, OptionQuery>; #[pallet::storage] #[pallet::getter(fn foo2)] pub type TestGenericDoubleMap = StorageDoubleMap< @@ -67,7 +68,7 @@ mod no_instance { Blake2_128Concat, u32, Blake2_128Concat, - T::BlockNumber, + BlockNumberFor, u32, ValueQuery, >; @@ -75,8 +76,8 @@ mod no_instance { #[pallet::genesis_config] pub struct GenesisConfig { pub value: u32, - pub test_generic_value: T::BlockNumber, - pub test_generic_double_map: Vec<(u32, T::BlockNumber, u32)>, + pub test_generic_value: BlockNumberFor, + pub test_generic_double_map: Vec<(u32, BlockNumberFor, u32)>, } impl Default for GenesisConfig { @@ -90,7 +91,7 @@ mod no_instance { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { >::put(self.value); >::put(&self.test_generic_value); @@ -105,7 +106,6 @@ mod no_instance { mod instance { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -136,7 +136,7 @@ mod instance { #[pallet::storage] #[pallet::getter(fn test_generic_value)] pub type TestGenericValue, I: 'static = ()> = - StorageValue<_, T::BlockNumber, OptionQuery>; + StorageValue<_, BlockNumberFor, OptionQuery>; #[pallet::storage] #[pallet::getter(fn foo2)] pub type TestGenericDoubleMap, I: 'static = ()> = StorageDoubleMap< @@ -144,7 +144,7 @@ mod instance { Blake2_128Concat, u32, Blake2_128Concat, - T::BlockNumber, + BlockNumberFor, u32, ValueQuery, >; @@ -152,8 +152,8 @@ mod instance { #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { pub value: u32, - pub test_generic_value: T::BlockNumber, - pub test_generic_double_map: Vec<(u32, T::BlockNumber, u32)>, + pub test_generic_value: BlockNumberFor, + pub test_generic_double_map: Vec<(u32, BlockNumberFor, u32)>, pub phantom: PhantomData, } @@ -169,7 +169,7 @@ mod instance { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { >::put(self.value); >::put(&self.test_generic_value); @@ -201,27 +201,25 @@ pub type Block = generic::Block; frame_support::construct_runtime!( pub enum Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + { - System: frame_support_test, + System: frame_system, FinalKeysNone: no_instance, FinalKeysSome: instance, Instance2FinalKeysSome: instance::, } ); -impl frame_support_test::Config for Runtime { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl no_instance::Config for Runtime {} diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index 48904baf1ad62..c6781220692a9 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -15,7 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_core::sr25519; +use frame_support::derive_impl; +use frame_system::pallet_prelude::BlockNumberFor; +use sp_core::{sr25519, ConstU32}; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, @@ -25,10 +27,9 @@ use sp_runtime::{ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config {} @@ -39,11 +40,11 @@ pub mod pallet { #[pallet::storage] #[pallet::unbounded] pub type AppendableDM = - StorageDoubleMap<_, Identity, u32, Identity, T::BlockNumber, Vec>; + StorageDoubleMap<_, Identity, u32, Identity, BlockNumberFor, Vec>; #[pallet::genesis_config] pub struct GenesisConfig { - pub t: Vec<(u32, T::BlockNumber, Vec)>, + pub t: Vec<(u32, BlockNumberFor, Vec)>, } impl Default for GenesisConfig { @@ -53,7 +54,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { for (k1, k2, v) in &self.t { >::insert(k1, k2, v); @@ -71,25 +72,23 @@ pub type Block = generic::Block; frame_support::construct_runtime!( pub enum Test - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + { - System: frame_support_test, + System: frame_system, MyPallet: pallet, } ); -impl frame_support_test::Config for Test { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl pallet::Config for Test {} diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 5d17a40f8c408..2fb6d7658d6ab 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -18,6 +18,7 @@ #![recursion_limit = "128"] use frame_support::{ + derive_impl, inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, metadata_ir::{ PalletStorageMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, @@ -25,6 +26,7 @@ use frame_support::{ }, traits::ConstU32, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_core::sr25519; use sp_runtime::{ generic, @@ -39,10 +41,9 @@ pub trait Currency {} // * Origin, Inherent, Event #[frame_support::pallet(dev_mode)] mod module1 { - use self::frame_system::pallet_prelude::*; use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -77,7 +78,7 @@ mod module1 { #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { pub value: >::GenericType, - pub test: ::BlockNumber, + pub test: BlockNumberFor, } impl, I: 'static> Default for GenesisConfig { @@ -87,9 +88,9 @@ mod module1 { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig + impl, I: 'static> BuildGenesisConfig for GenesisConfig where - T::BlockNumber: std::fmt::Display, + BlockNumberFor: std::fmt::Display, { fn build(&self) { >::put(self.value.clone()); @@ -123,7 +124,7 @@ mod module1 { #[pallet::inherent] impl, I: 'static> ProvideInherent for Pallet where - T::BlockNumber: From, + BlockNumberFor: From, { type Call = Call; type Error = MakeFatalError<()>; @@ -150,7 +151,6 @@ mod module1 { mod module2 { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -196,9 +196,9 @@ mod module2 { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig + impl, I: 'static> BuildGenesisConfig for GenesisConfig where - T::BlockNumber: std::fmt::Display, + BlockNumberFor: std::fmt::Display, { fn build(&self) { >::put(self.value.clone()); @@ -252,7 +252,6 @@ mod module2 { mod module3 { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -277,12 +276,9 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Runtime { - System: frame_support_test::{Pallet, Call, Event}, + System: frame_system::{Pallet, Call, Event}, Module1_1: module1::::{ Pallet, Call, Storage, Event, Config, Origin, Inherent }, @@ -303,15 +299,16 @@ frame_support::construct_runtime!( } ); -impl frame_support_test::Config for Runtime { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl module1::Config for Runtime { @@ -352,7 +349,7 @@ impl module3::Config for Runtime { } fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig { + RuntimeGenesisConfig { module_1_1: module1::GenesisConfig { value: 3, test: 2 }, module_1_2: module1::GenesisConfig { value: 4, test: 5 }, module_2: module2::GenesisConfig { diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index ff3e0bd951873..4016707b51a8d 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use frame_support::derive_impl; +use frame_system::pallet_prelude::BlockNumberFor; use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, @@ -25,10 +27,8 @@ use sp_runtime::{ mod module { use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; - pub type Request = - (::AccountId, Role, ::BlockNumber); + pub type Request = (::AccountId, Role, BlockNumberFor); pub type Requests = Vec>; #[derive(Copy, Clone, Eq, PartialEq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -46,41 +46,41 @@ mod module { pub max_actors: u32, // payouts are made at this block interval - pub reward_period: T::BlockNumber, + pub reward_period: BlockNumberFor, // minimum amount of time before being able to unstake - pub bonding_period: T::BlockNumber, + pub bonding_period: BlockNumberFor, // how long tokens remain locked for after unstaking - pub unbonding_period: T::BlockNumber, + pub unbonding_period: BlockNumberFor, // minimum period required to be in service. unbonding before this time is highly penalized - pub min_service_period: T::BlockNumber, + pub min_service_period: BlockNumberFor, // "startup" time allowed for roles that need to sync their infrastructure // with other providers before they are considered in service and punishable for // not delivering required level of service. - pub startup_grace_period: T::BlockNumber, + pub startup_grace_period: BlockNumberFor, } impl Default for RoleParameters { fn default() -> Self { Self { max_actors: 10, - reward_period: T::BlockNumber::default(), - unbonding_period: T::BlockNumber::default(), + reward_period: BlockNumberFor::::default(), + unbonding_period: BlockNumberFor::::default(), // not currently used min_actors: 5, - bonding_period: T::BlockNumber::default(), - min_service_period: T::BlockNumber::default(), - startup_grace_period: T::BlockNumber::default(), + bonding_period: BlockNumberFor::::default(), + min_service_period: BlockNumberFor::::default(), + startup_grace_period: BlockNumberFor::::default(), } } } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + TypeInfo {} @@ -115,7 +115,7 @@ mod module { /// tokens locked until given block number #[pallet::storage] #[pallet::getter(fn bondage)] - pub type Bondage = StorageMap<_, Blake2_128Concat, T::AccountId, T::BlockNumber>; + pub type Bondage = StorageMap<_, Blake2_128Concat, T::AccountId, BlockNumberFor>; /// First step before enter a role is registering intent with a new account/key. /// This is done by sending a role_entry_request() from the new account. @@ -134,14 +134,16 @@ mod module { pub type RequestLifeTime = StorageValue<_, u64, ValueQuery, ConstU64<0>>; #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig { + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { pub enable_storage_role: bool, pub request_life_time: u64, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { if self.enable_storage_role { >::insert(Role::Storage, >::default()); @@ -159,35 +161,36 @@ pub type Header = generic::Header; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; -impl frame_support_test::Config for Runtime { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU64<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl module::Config for Runtime {} frame_support::construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_support_test, + pub struct Runtime { + System: frame_system, Module: module, } ); #[test] fn create_genesis_config() { - let config = GenesisConfig { - module: module::GenesisConfig { request_life_time: 0, enable_storage_role: true }, + let config = RuntimeGenesisConfig { + system: Default::default(), + module: module::GenesisConfig { + request_life_time: 0, + enable_storage_role: true, + ..Default::default() + }, }; assert_eq!(config.module.request_life_time, 0); assert!(config.module.enable_storage_role); diff --git a/frame/support/test/tests/origin.rs b/frame/support/test/tests/origin.rs index 47451157b352c..5682bb500c7e3 100644 --- a/frame/support/test/tests/origin.rs +++ b/frame/support/test/tests/origin.rs @@ -19,15 +19,18 @@ #![recursion_limit = "128"] -use frame_support::traits::{Contains, OriginTrait}; +use frame_support::{ + derive_impl, + traits::{Contains, OriginTrait}, +}; +use sp_core::ConstU32; use sp_runtime::{generic, traits::BlakeTwo256}; mod nested { #[frame_support::pallet(dev_mode)] pub mod module { - use self::frame_system::pallet_prelude::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -60,11 +63,14 @@ mod nested { } #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig {} + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } } @@ -72,9 +78,8 @@ mod nested { #[frame_support::pallet(dev_mode)] pub mod module { - use self::frame_system::pallet_prelude::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -128,11 +133,14 @@ pub mod module { } #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig {} + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } } @@ -154,26 +162,24 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub enum RuntimeOriginTest where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum RuntimeOriginTest { - System: frame_support_test, + System: frame_system, NestedModule: nested::module, Module: module, } ); -impl frame_support_test::Config for RuntimeOriginTest { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for RuntimeOriginTest { type BaseCallFilter = BaseCallFilter; + type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl nested::module::Config for RuntimeOriginTest { diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 7f15ad1f9298a..cb78bded1a358 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -36,7 +36,10 @@ use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, }; -use sp_runtime::{DispatchError, ModuleError}; +use sp_runtime::{ + traits::{Extrinsic as ExtrinsicT, SignaturePayload as SignaturePayloadT}, + DispatchError, ModuleError, +}; parameter_types! { /// Used to control if the storage version should be updated. @@ -44,7 +47,7 @@ parameter_types! { } /// Latest stable metadata version used for testing. -const LATEST_METADATA_VERSION: u32 = 14; +const LATEST_METADATA_VERSION: u32 = 15; pub struct SomeType1; impl From for u64 { @@ -365,6 +368,27 @@ pub mod pallet { ResultQuery::NonExistentStorageValue>, >; + #[pallet::storage] + #[pallet::getter(fn counted_nmap)] + pub type CountedNMap = CountedStorageNMap<_, storage::Key, u32>; + + #[pallet::storage] + #[pallet::getter(fn counted_nmap2)] + pub type CountedNMap2 = CountedStorageNMap< + Key = (NMapKey, NMapKey), + Value = u64, + MaxValues = ConstU32<11>, + >; + + #[pallet::storage] + #[pallet::getter(fn counted_nmap3)] + pub type CountedNMap3 = CountedStorageNMap< + _, + (NMapKey, NMapKey), + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn conditional_value)] #[cfg(feature = "frame-feature-testing")] @@ -388,6 +412,15 @@ pub mod pallet { pub type ConditionalNMap = StorageNMap<_, (storage::Key, storage::Key), u32>; + #[cfg(feature = "frame-feature-testing")] + #[pallet::storage] + #[pallet::getter(fn conditional_counted_nmap)] + pub type ConditionalCountedNMap = CountedStorageNMap< + _, + (storage::Key, storage::Key), + u32, + >; + #[pallet::storage] #[pallet::storage_prefix = "RenamedCountedMap"] #[pallet::getter(fn counted_storage_map)] @@ -399,13 +432,18 @@ pub mod pallet { pub type Unbounded = StorageValue>; #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig { + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig + where + T::AccountId: From + SomeAssociation1 + From, + { + #[serde(skip)] + _config: sp_std::marker::PhantomData, _myfield: u32, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig + impl BuildGenesisConfig for GenesisConfig where T::AccountId: From + SomeAssociation1 + From, { @@ -583,7 +621,7 @@ pub mod pallet2 { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig + impl BuildGenesisConfig for GenesisConfig where T::AccountId: From + SomeAssociation1, { @@ -648,14 +686,13 @@ frame_support::parameter_types!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); @@ -701,10 +738,7 @@ pub type UncheckedExtrinsic = sp_runtime::testing::TestXt>; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Pallet, Storage }, @@ -814,13 +848,10 @@ fn instance_expand() { #[test] fn inherent_expand() { - use frame_support::{ - inherent::{BlockT, InherentData}, - traits::EnsureInherentsAreFirst, - }; + use frame_support::{inherent::InherentData, traits::EnsureInherentsAreFirst}; use sp_core::Hasher; use sp_runtime::{ - traits::{BlakeTwo256, Header}, + traits::{BlakeTwo256, Block as _, Header}, Digest, }; @@ -1124,6 +1155,7 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(pallet::Pallet::::nmap2((1, 2)), Some(3u64)); pallet::NMap3::::insert((&1, &2), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); @@ -1131,11 +1163,56 @@ fn storage_expand() { k.extend(2u16.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(3u128)); assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(pallet::Pallet::::nmap3((1, 2)), Ok(3u128)); assert_eq!( pallet::NMap3::::get((2, 3)), Err(pallet::Error::::NonExistentStorageValue), ); + pallet::CountedNMap::::insert((&1,), &3); + let mut k = [twox_128(b"Example"), twox_128(b"CountedNMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(pallet::CountedNMap::::count(), 1); + assert_eq!( + unhashed::get::( + &[twox_128(b"Example"), twox_128(b"CounterForCountedNMap")].concat() + ), + Some(1u32) + ); + + pallet::CountedNMap2::::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"CountedNMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(pallet::CountedNMap2::::count(), 1); + assert_eq!( + unhashed::get::( + &[twox_128(b"Example"), twox_128(b"CounterForCountedNMap2")].concat() + ), + Some(1u32) + ); + assert_eq!(pallet::Pallet::::counted_nmap2((1, 2)), Some(3u64)); + + pallet::CountedNMap3::::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"CountedNMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(pallet::CountedNMap3::::count(), 1); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(pallet::Pallet::::counted_nmap3((1, 2)), Ok(3u128)); + assert_eq!( + pallet::CountedNMap3::::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); + assert_eq!( + unhashed::get::( + &[twox_128(b"Example"), twox_128(b"CounterForCountedNMap3")].concat() + ), + Some(1u32) + ); + #[cfg(feature = "frame-feature-testing")] { pallet::ConditionalValue::::put(1); @@ -1300,7 +1377,8 @@ fn migrate_from_pallet_version_to_storage_version() { #[test] fn metadata() { - use frame_support::metadata::*; + use codec::Decode; + use frame_support::metadata::{v15::*, *}; fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { if cfg!(feature = "no-metadata-docs") { @@ -1310,6 +1388,9 @@ fn metadata() { } } + let readme = "Support code for the runtime.\n\nLicense: Apache-2.0"; + let expected_pallet_doc = vec![" Pallet documentation", readme, readme]; + let pallets = vec![ PalletMetadata { index: 1, @@ -1452,6 +1533,66 @@ fn metadata() { default: vec![1, 1], docs: vec![], }, + StorageEntryMetadata { + name: "CountedNMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForCountedNMap", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: maybe_docs(vec!["Counter for the related counted storage map"]), + }, + StorageEntryMetadata { + name: "CountedNMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u16, u32)>(), + hashers: vec![ + StorageHasher::Twox64Concat, + StorageHasher::Blake2_128Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForCountedNMap2", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: maybe_docs(vec!["Counter for the related counted storage map"]), + }, + StorageEntryMetadata { + name: "CountedNMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![1, 1], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForCountedNMap3", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: maybe_docs(vec!["Counter for the related counted storage map"]), + }, #[cfg(feature = "frame-feature-testing")] StorageEntryMetadata { name: "ConditionalValue", @@ -1502,6 +1643,29 @@ fn metadata() { default: vec![0], docs: vec![], }, + #[cfg(feature = "frame-feature-testing")] + StorageEntryMetadata { + name: "ConditionalCountedNMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "frame-feature-testing")] + StorageEntryMetadata { + name: "CounterForConditionalCountedNMap", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: maybe_docs(vec!["Counter for the related counted storage map"]), + }, StorageEntryMetadata { name: "RenamedCountedMap", modifier: StorageEntryModifier::Optional, @@ -1570,6 +1734,7 @@ fn metadata() { }, ], error: Some(PalletErrorMetadata { ty: meta_type::>() }), + docs: expected_pallet_doc, }, PalletMetadata { index: 2, @@ -1608,6 +1773,7 @@ fn metadata() { event: Some(PalletEventMetadata { ty: meta_type::() }), constants: vec![], error: None, + docs: vec![], }, #[cfg(feature = "frame-feature-testing")] PalletMetadata { @@ -1618,6 +1784,7 @@ fn metadata() { event: None, constants: vec![], error: None, + docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, #[cfg(feature = "frame-feature-testing-2")] PalletMetadata { @@ -1628,6 +1795,7 @@ fn metadata() { event: None, constants: vec![], error: None, + docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, ]; @@ -1642,24 +1810,48 @@ fn metadata() { } let extrinsic = ExtrinsicMetadata { - ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { identifier: "UnitSignedExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], + address_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), + call_ty: meta_type::<::Call>(), + signature_ty: meta_type::< + <::SignaturePayload as SignaturePayloadT>::Signature + >(), + extra_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), + }; + + let outer_enums = OuterEnums { + call_enum_ty: meta_type::(), + event_enum_ty: meta_type::(), + error_enum_ty: meta_type::(), }; - let expected_metadata: RuntimeMetadataPrefixed = - RuntimeMetadataLastVersion::new(pallets, extrinsic, meta_type::()).into(); + let expected_metadata: RuntimeMetadataPrefixed = RuntimeMetadataLastVersion::new( + pallets, + extrinsic, + meta_type::(), + vec![], + outer_enums, + CustomMetadata { map: Default::default() }, + ) + .into(); let expected_metadata = match expected_metadata.1 { - RuntimeMetadata::V14(metadata) => metadata, + RuntimeMetadata::V15(metadata) => metadata, _ => panic!("metadata has been bumped, test needs to be updated"), }; - let actual_metadata = match Runtime::metadata().1 { - RuntimeMetadata::V14(metadata) => metadata, + let bytes = &Runtime::metadata_at_version(LATEST_METADATA_VERSION) + .expect("Metadata must be present; qed"); + + let actual_metadata: RuntimeMetadataPrefixed = + Decode::decode(&mut &bytes[..]).expect("Metadata encoded properly; qed"); + + let actual_metadata = match actual_metadata.1 { + RuntimeMetadata::V15(metadata) => metadata, _ => panic!("metadata has been bumped, test needs to be updated"), }; @@ -1671,8 +1863,9 @@ fn metadata_at_version() { use frame_support::metadata::*; use sp_core::Decode; + // Metadata always returns the V14.3 let metadata = Runtime::metadata(); - let at_metadata = match Runtime::metadata_at_version(LATEST_METADATA_VERSION) { + let at_metadata = match Runtime::metadata_at_version(14) { Some(opaque) => { let bytes = &*opaque; let metadata: RuntimeMetadataPrefixed = Decode::decode(&mut &bytes[..]).unwrap(); @@ -1686,7 +1879,7 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!(vec![LATEST_METADATA_VERSION, u32::MAX], Runtime::metadata_versions()); + assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); } #[test] @@ -1703,6 +1896,28 @@ fn metadata_ir_pallet_runtime_docs() { assert_eq!(pallet.docs, expected); } +#[test] +fn extrinsic_metadata_ir_types() { + let ir = Runtime::metadata_ir().extrinsic; + + assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), ir.address_ty); + assert_eq!(meta_type::(), ir.address_ty); + + assert_eq!(meta_type::<::Call>(), ir.call_ty); + assert_eq!(meta_type::(), ir.call_ty); + + assert_eq!( + meta_type::< + <::SignaturePayload as SignaturePayloadT>::Signature, + >(), + ir.signature_ty + ); + assert_eq!(meta_type::<()>(), ir.signature_ty); + + assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); + assert_eq!(meta_type::>(), ir.extra_ty); +} + #[test] fn test_pallet_runtime_docs() { let docs = crate::pallet::Pallet::::pallet_documentation_metadata(); @@ -1716,7 +1931,6 @@ fn test_pallet_info_access() { assert_eq!(::name(), "System"); assert_eq!(::name(), "Example"); assert_eq!(::name(), "Example2"); - assert_eq!(::index(), 0); assert_eq!(::index(), 1); assert_eq!(::index(), 2); @@ -1818,6 +2032,48 @@ fn test_storage_info() { max_values: None, max_size: Some(16 + 1 + 8 + 2 + 16), }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CountedNMap".to_vec(), + prefix: prefix(b"Example", b"CountedNMap").to_vec(), + max_values: None, + max_size: Some(16 + 1 + 4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForCountedNMap".to_vec(), + prefix: prefix(b"Example", b"CounterForCountedNMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CountedNMap2".to_vec(), + prefix: prefix(b"Example", b"CountedNMap2").to_vec(), + max_values: Some(11), + max_size: Some(8 + 2 + 16 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForCountedNMap2".to_vec(), + prefix: prefix(b"Example", b"CounterForCountedNMap2").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CountedNMap3".to_vec(), + prefix: prefix(b"Example", b"CountedNMap3").to_vec(), + max_values: None, + max_size: Some(16 + 1 + 8 + 2 + 16), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForCountedNMap3".to_vec(), + prefix: prefix(b"Example", b"CounterForCountedNMap3").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, #[cfg(feature = "frame-feature-testing")] { StorageInfo { @@ -1858,6 +2114,26 @@ fn test_storage_info() { max_size: Some(16 + 1 + 8 + 2 + 4), } }, + #[cfg(feature = "frame-feature-testing")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ConditionalCountedNMap".to_vec(), + prefix: prefix(b"Example", b"ConditionalCountedNMap").to_vec(), + max_values: None, + max_size: Some(16 + 1 + 8 + 2 + 4), + } + }, + #[cfg(feature = "frame-feature-testing")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForConditionalCountedNMap".to_vec(), + prefix: prefix(b"Example", b"CounterForConditionalCountedNMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + } + }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"RenamedCountedMap".to_vec(), @@ -2110,9 +2386,11 @@ fn post_runtime_upgrade_detects_storage_version_issues() { // Call `on_genesis` to put the storage version of `Example` into the storage. Example::on_genesis(); // The version isn't changed, we should detect it. - assert!(Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost) - .unwrap_err() - .contains("On chain and current storage version do not match")); + assert!( + Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == + "On chain and current storage version do not match. Missing runtime upgrade?" + .into() + ); }); TestExternalities::default().execute_with(|| { @@ -2138,9 +2416,12 @@ fn post_runtime_upgrade_detects_storage_version_issues() { // `CustomUpgradePallet4` will set a storage version for `Example4` while this doesn't has // any storage version "enabled". - assert!(ExecutiveWithUpgradePallet4::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost) - .unwrap_err() - .contains("On chain storage version set, while the pallet doesn't")); + assert!( + ExecutiveWithUpgradePallet4::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost) + .unwrap_err() == "On chain storage version set, while the pallet \ + doesn't have the `#[pallet::storage_version(VERSION)]` attribute." + .into() + ); }); } diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 0747753289af0..4ecb9bcb58ae9 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -185,13 +185,15 @@ pub mod pallet { >; #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig { + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig, I: 'static = ()> { + #[serde(skip)] + _config: sp_std::marker::PhantomData<(T, I)>, _myfield: u32, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) {} } @@ -281,7 +283,7 @@ pub mod pallet2 { } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) {} } } @@ -289,14 +291,13 @@ pub mod pallet2 { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); @@ -334,10 +335,7 @@ pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Storage }, @@ -419,6 +417,39 @@ fn error_expand() { ); } +#[test] +fn module_error_outer_enum_expand() { + // assert that all variants of the Example pallet are included into the + // RuntimeError definition. + match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { + RuntimeError::Example(example) => match example { + pallet::Error::InsufficientProposersBalance => (), + pallet::Error::NonExistentStorageValue => (), + // Extra pattern added by `construct_runtime`. + pallet::Error::__Ignore(_, _) => (), + }, + _ => (), + }; +} + +#[test] +fn module_error_from_dispatch_error() { + let dispatch_err = DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("InsufficientProposersBalance"), + }); + let err = RuntimeError::from_dispatch_error(dispatch_err).unwrap(); + + match err { + RuntimeError::Example(pallet::Error::InsufficientProposersBalance) => (), + _ => panic!("Module error constructed incorrectly"), + }; + + // Only `ModuleError` is converted. + assert!(RuntimeError::from_dispatch_error(DispatchError::BadOrigin).is_none()); +} + #[test] fn instance_expand() { // assert same type @@ -690,7 +721,7 @@ fn pallet_on_genesis() { #[test] fn metadata() { - use frame_support::metadata::*; + use frame_support::metadata::{v14::*, *}; let system_pallet_metadata = PalletMetadata { index: 0, diff --git a/frame/support/test/tests/pallet_outer_enums_explicit.rs b/frame/support/test/tests/pallet_outer_enums_explicit.rs new file mode 100644 index 0000000000000..a8250f8b15325 --- /dev/null +++ b/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -0,0 +1,122 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{derive_impl, traits::ConstU32}; + +mod common; + +use common::outer_enums::{pallet, pallet2}; + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); +} + +impl common::outer_enums::pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +frame_support::construct_runtime!( + pub struct Runtime + { + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system::{Pallet, Config, Call, Event }, + + // This pallet exposes the Error type explicitly. + Example: common::outer_enums::pallet::{Pallet, Config, Event, Error}, + Instance1Example: common::outer_enums::pallet::::{ Pallet, Config, Event }, + + // This pallet does not mention the Error type, but it must be propagated (similarly to the polkadot/kusama). + Example2: common::outer_enums::pallet2::{Pallet, Config, Event }, + Instance1Example2: common::outer_enums::pallet2::::{Pallet, Config, Event}, + + // This pallet does not declare any errors. + Example3: common::outer_enums::pallet3::{Pallet, Config, Event}, + Instance1Example3: common::outer_enums::pallet3::::{Pallet, Config, Event}, + } +); + +#[test] +fn module_error_outer_enum_expand_explicit() { + // The Runtime has *all* parts explicitly defined. + + // Check that all error types are propagated + match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { + // Error passed implicitely to the pallet system. + RuntimeError::System(system) => match system { + frame_system::Error::InvalidSpecName => (), + frame_system::Error::SpecVersionNeedsToIncrease => (), + frame_system::Error::FailedToExtractRuntimeVersion => (), + frame_system::Error::NonDefaultComposite => (), + frame_system::Error::NonZeroRefCount => (), + frame_system::Error::CallFiltered => (), + frame_system::Error::__Ignore(_, _) => (), + }, + + // Error declared explicitly. + RuntimeError::Example(example) => match example { + pallet::Error::InsufficientProposersBalance => (), + pallet::Error::NonExistentStorageValue => (), + pallet::Error::__Ignore(_, _) => (), + }, + // Error declared explicitly. + RuntimeError::Instance1Example(example) => match example { + pallet::Error::InsufficientProposersBalance => (), + pallet::Error::NonExistentStorageValue => (), + pallet::Error::__Ignore(_, _) => (), + }, + + // Error must propagate even if not defined explicitly as pallet part. + RuntimeError::Example2(example) => match example { + pallet2::Error::OtherInsufficientProposersBalance => (), + pallet2::Error::OtherNonExistentStorageValue => (), + pallet2::Error::__Ignore(_, _) => (), + }, + // Error must propagate even if not defined explicitly as pallet part. + RuntimeError::Instance1Example2(example) => match example { + pallet2::Error::OtherInsufficientProposersBalance => (), + pallet2::Error::OtherNonExistentStorageValue => (), + pallet2::Error::__Ignore(_, _) => (), + }, + }; +} diff --git a/frame/support/test/tests/pallet_outer_enums_implicit.rs b/frame/support/test/tests/pallet_outer_enums_implicit.rs new file mode 100644 index 0000000000000..191f095f5d78d --- /dev/null +++ b/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -0,0 +1,122 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{derive_impl, traits::ConstU32}; + +mod common; + +use common::outer_enums::{pallet, pallet2}; + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); +} + +impl common::outer_enums::pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl common::outer_enums::pallet3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +frame_support::construct_runtime!( + pub struct Runtime + { + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system exclude_parts { Storage }, + + // Pallet exposes `Error` implicitely. + Example: common::outer_enums::pallet, + Instance1Example: common::outer_enums::pallet::, + + // Pallet exposes `Error` implicitely. + Example2: common::outer_enums::pallet2, + Instance1Example2: common::outer_enums::pallet2::, + + // Pallet does not implement error. + Example3: common::outer_enums::pallet3, + Instance1Example3: common::outer_enums::pallet3::, + } +); + +#[test] +fn module_error_outer_enum_expand_implicit() { + // The Runtime has *all* parts implicitly defined. + + // Check that all error types are propagated + match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { + // Error passed implicitely to the pallet system. + RuntimeError::System(system) => match system { + frame_system::Error::InvalidSpecName => (), + frame_system::Error::SpecVersionNeedsToIncrease => (), + frame_system::Error::FailedToExtractRuntimeVersion => (), + frame_system::Error::NonDefaultComposite => (), + frame_system::Error::NonZeroRefCount => (), + frame_system::Error::CallFiltered => (), + frame_system::Error::__Ignore(_, _) => (), + }, + + // Error declared explicitly. + RuntimeError::Example(example) => match example { + pallet::Error::InsufficientProposersBalance => (), + pallet::Error::NonExistentStorageValue => (), + pallet::Error::__Ignore(_, _) => (), + }, + // Error declared explicitly. + RuntimeError::Instance1Example(example) => match example { + pallet::Error::InsufficientProposersBalance => (), + pallet::Error::NonExistentStorageValue => (), + pallet::Error::__Ignore(_, _) => (), + }, + + // Error must propagate even if not defined explicitly as pallet part. + RuntimeError::Example2(example) => match example { + pallet2::Error::OtherInsufficientProposersBalance => (), + pallet2::Error::OtherNonExistentStorageValue => (), + pallet2::Error::__Ignore(_, _) => (), + }, + // Error must propagate even if not defined explicitly as pallet part. + RuntimeError::Instance1Example2(example) => match example { + pallet2::Error::OtherInsufficientProposersBalance => (), + pallet2::Error::OtherNonExistentStorageValue => (), + pallet2::Error::__Ignore(_, _) => (), + }, + }; +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 1b084dd2f76bc..4cbed3709626c 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -23,5 +23,6 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | -17 | #[derive(Debug)] +17 + #[derive(Debug)] +18 | struct Bar; | diff --git a/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.rs b/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.rs new file mode 100644 index 0000000000000..7127ecf78594e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.rs @@ -0,0 +1,15 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(_); +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.stderr b/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.stderr new file mode 100644 index 0000000000000..bc657f8f654ec --- /dev/null +++ b/frame/support/test/tests/pallet_ui/default_config_with_no_default_in_system.stderr @@ -0,0 +1,5 @@ +error[E0220]: associated type `RuntimeCall` not found for `Self` + --> tests/pallet_ui/default_config_with_no_default_in_system.rs:8:31 + | +8 | type MyGetParam2: Get; + | ^^^^^^^^^^^ associated type `RuntimeCall` not found diff --git a/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr b/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr index bbc4743fc1003..5d2734b4db601 100644 --- a/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr +++ b/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr @@ -1,5 +1,5 @@ error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed soon. + Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. Check https://github.com/paritytech/substrate/pull/13535 for more details. --> tests/pallet_ui/deprecated_store_attr.rs:7:3 | diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.rs new file mode 100644 index 0000000000000..1920b6799de25 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.rs @@ -0,0 +1,31 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::OriginFor; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(_); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + // Your Pallet's callable functions. + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } + + // Your Pallet's internal functions. + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr new file mode 100644 index 0000000000000..b75edff1ab5f3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_call_index.stderr @@ -0,0 +1,24 @@ +error: use of deprecated constant `pallet::warnings::ImplicitCallIndex_0::_w`: + It is deprecated to use implicit call indices. + Please instead ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode. + + For more info see: + + + --> tests/pallet_ui/dev_mode_without_arg_call_index.rs:22:10 + | +22 | pub fn my_call(_origin: OriginFor) -> DispatchResult { + | ^^^^^^^ + | + = note: `-D deprecated` implied by `-D warnings` + +error: use of deprecated constant `pallet::warnings::ConstantWeight_0::_w`: + It is deprecated to use hard-coded constant as call weight. + Please instead benchmark all calls or put the pallet into `dev` mode. + + For more info see: + + --> tests/pallet_ui/dev_mode_without_arg_call_index.rs:21:20 + | +21 | #[pallet::weight(0)] + | ^ diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs index da5e8d0c4da52..a4a0eb832c9c0 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs @@ -1,6 +1,6 @@ #[frame_support::pallet] mod pallet { - use frame_support::pallet_prelude::{Hooks, GenesisBuild}; + use frame_support::pallet_prelude::{BuildGenesisConfig, Hooks}; use frame_system::pallet_prelude::BlockNumberFor; #[pallet::config] @@ -19,7 +19,7 @@ mod pallet { pub struct GenesisConfig; #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig {} + impl BuildGenesisConfig for GenesisConfig {} } fn main() { diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index 22b5ce9412ce7..7245333c9842e 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -1,15 +1,16 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied - --> tests/pallet_ui/genesis_default_not_satisfied.rs:22:38 + --> tests/pallet_ui/genesis_default_not_satisfied.rs:22:30 | -22 | impl GenesisBuild for GenesisConfig {} - | ^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` +22 | impl BuildGenesisConfig for GenesisConfig {} + | ^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` | -note: required by a bound in `GenesisBuild` +note: required by a bound in `BuildGenesisConfig` --> $WORKSPACE/frame/support/src/traits/hooks.rs | - | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { - | ^^^^^^^ required by this bound in `GenesisBuild` + | pub trait BuildGenesisConfig: Default + sp_runtime::traits::MaybeSerializeDeserialize { + | ^^^^^^^ required by this bound in `BuildGenesisConfig` help: consider annotating `pallet::GenesisConfig` with `#[derive(Default)]` | -19 | #[derive(Default)] +19 + #[derive(Default)] +20 | pub struct GenesisConfig; | diff --git a/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.rs b/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.rs new file mode 100644 index 0000000000000..5ffa13c22243d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + #[pallet::no_default] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr b/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr new file mode 100644 index 0000000000000..aebde115eb80e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr @@ -0,0 +1,5 @@ +error: `#[pallet:no_default]` can only be used if `#[pallet::config(with_default)]` has been specified + --> tests/pallet_ui/no_default_but_missing_with_default.rs:9:4 + | +9 | #[pallet::no_default] + | ^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.rs b/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.rs index ef3097d23007d..32df5d6183653 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.rs +++ b/frame/support/test/tests/pallet_ui/pallet_doc_arg_non_path.rs @@ -5,7 +5,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config where - ::Index: From, + ::Nonce: From, { } diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_empty.rs b/frame/support/test/tests/pallet_ui/pallet_doc_empty.rs index fe40806d2fa75..6ff01e9fb44b8 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_empty.rs +++ b/frame/support/test/tests/pallet_ui/pallet_doc_empty.rs @@ -5,7 +5,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config where - ::Index: From, + ::Nonce: From, { } diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.rs b/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.rs index 8f0ccb3777a49..c7d3b556a08e2 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.rs +++ b/frame/support/test/tests/pallet_ui/pallet_doc_invalid_arg.rs @@ -5,7 +5,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config where - ::Index: From, + ::Nonce: From, { } diff --git a/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.rs b/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.rs index ffbed9d950799..a799879fe4442 100644 --- a/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.rs +++ b/frame/support/test/tests/pallet_ui/pallet_doc_multiple_args.rs @@ -5,7 +5,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config where - ::Index: From, + ::Nonce: From, { } diff --git a/frame/support/test/tests/pallet_ui/pass/default_config.rs b/frame/support/test/tests/pallet_ui/pass/default_config.rs new file mode 100644 index 0000000000000..9f90ae67d5779 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/default_config.rs @@ -0,0 +1,15 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(_); +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index 28b901213943c..ed779da80a188 100644 --- a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -1,10 +1,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{ - traits::{ - ConstU32, - }, -}; +use frame_support::traits::ConstU32; pub use pallet::*; @@ -60,14 +56,13 @@ pub mod pallet { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); @@ -89,10 +84,7 @@ pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Pallet, Storage }, @@ -100,17 +92,15 @@ frame_support::construct_runtime!( } ); -impl pallet::Config for Runtime { - -} +impl pallet::Config for Runtime {} fn main() { - use frame_support::{pallet_prelude::*}; - use storage::unhashed; + use frame_support::pallet_prelude::*; use sp_io::{ hashing::{blake2_128, twox_128}, TestExternalities, }; + use storage::unhashed; fn blake2_128_concat(d: &[u8]) -> Vec { let mut v = blake2_128(d).to_vec(); diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs b/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs similarity index 81% rename from frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs rename to frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs index 9c9c49c4b2740..87659a0bab513 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs +++ b/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs @@ -1,6 +1,6 @@ use frame_support::construct_runtime; -use sp_runtime::{generic, traits::BlakeTwo256}; use sp_core::sr25519; +use sp_runtime::{generic, traits::BlakeTwo256}; pub type Signature = sr25519::Signature; pub type BlockNumber = u32; @@ -13,14 +13,13 @@ impl test_pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); @@ -38,13 +37,10 @@ impl frame_system::Config for Runtime { } construct_runtime! { - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub struct Runtime { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Pallet: test_pallet::{Pallet, Config}, + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Pallet: test_pallet::{Pallet, Config}, } } diff --git a/frame/support/test/tests/pallet_ui/pass/where_clause_missing_hooks.rs b/frame/support/test/tests/pallet_ui/pass/where_clause_missing_hooks.rs index bf5f22306207a..15fff372a1dd1 100644 --- a/frame/support/test/tests/pallet_ui/pass/where_clause_missing_hooks.rs +++ b/frame/support/test/tests/pallet_ui/pass/where_clause_missing_hooks.rs @@ -1,17 +1,24 @@ #[frame_support::pallet] mod pallet { #[pallet::config] - pub trait Config: frame_system::Config where ::Index: From {} + pub trait Config: frame_system::Config + where + ::Nonce: From, + { + } #[pallet::pallet] pub struct Pallet(core::marker::PhantomData); #[pallet::call] - impl Pallet where ::Index: From {} + impl Pallet where ::Nonce: From {} - impl Pallet where ::Index: From { + impl Pallet + where + ::Nonce: From, + { fn foo(x: u128) { - let _index = ::Index::from(x); + let _index = ::Nonce::from(x); } } } diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr index 223e9cfa3e9f8..3358f00151d50 100644 --- a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` or `CountedStorageNMap` in order to expand metadata, found `u8`. --> $DIR/storage_not_storage_type.rs:19:16 | 19 | type Foo = u8; diff --git a/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr index 24fda4ff1abbf..a8836bc048231 100644 --- a/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr +++ b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr @@ -1,5 +1,5 @@ error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed soon. + Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. Check https://github.com/paritytech/substrate/pull/13535 for more details. --> tests/pallet_ui/store_trait_leak_private.rs:11:3 | diff --git a/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr index 72495d94b3079..e3409a819114a 100644 --- a/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr @@ -1,5 +1,5 @@ -error: Invalid pallet::constant in pallet::config, expected type trait item - --> $DIR/trait_invalid_item.rs:9:3 +error: Invalid #[pallet::constant] in #[pallet::config], expected type item + --> tests/pallet_ui/trait_invalid_item.rs:9:3 | 9 | const U: u8 = 3; | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.rs b/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.rs new file mode 100644 index 0000000000000..8f3d9f3f3e2f9 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.stderr b/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.stderr new file mode 100644 index 0000000000000..3679b67f07b53 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_item_duplicate_constant_attr.stderr @@ -0,0 +1,5 @@ +error: Duplicate #[pallet::constant] attribute not allowed. + --> tests/pallet_ui/trait_item_duplicate_constant_attr.rs:9:4 + | +9 | #[pallet::constant] + | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.rs b/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.rs new file mode 100644 index 0000000000000..d2040ec74dc4e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.rs @@ -0,0 +1,24 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + #[pallet::constant] + #[pallet::no_default] + #[pallet::no_default] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.stderr b/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.stderr new file mode 100644 index 0000000000000..77a29c394d62d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_item_duplicate_no_default.stderr @@ -0,0 +1,5 @@ +error: Duplicate #[pallet::no_default] attribute not allowed. + --> tests/pallet_ui/trait_item_duplicate_no_default.rs:10:4 + | +10 | #[pallet::no_default] + | ^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/runtime_metadata.rs b/frame/support/test/tests/runtime_metadata.rs index 70ca307d4428c..50bad87d32eb0 100644 --- a/frame/support/test/tests/runtime_metadata.rs +++ b/frame/support/test/tests/runtime_metadata.rs @@ -25,7 +25,6 @@ use scale_info::{form::MetaForm, meta_type}; use sp_runtime::traits::Block as BlockT; pub type BlockNumber = u64; -pub type Index = u64; pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -36,14 +35,13 @@ impl frame_system::Config for Runtime { type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u32; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type Version = (); @@ -58,10 +56,7 @@ impl frame_system::Config for Runtime { } frame_support::construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Runtime { System: frame_system, } diff --git a/frame/support/test/tests/split_ui.rs b/frame/support/test/tests/split_ui.rs new file mode 100644 index 0000000000000..14f99b8ecdab1 --- /dev/null +++ b/frame/support/test/tests/split_ui.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] +#[test] +fn split_ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Deny all warnings since we emit warnings as part of a Pallet's UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/split_ui/*.rs"); + t.pass("tests/split_ui/pass/*.rs"); +} diff --git a/frame/support/test/tests/split_ui/import_without_pallet.rs b/frame/support/test/tests/split_ui/import_without_pallet.rs new file mode 100644 index 0000000000000..874a92e461098 --- /dev/null +++ b/frame/support/test/tests/split_ui/import_without_pallet.rs @@ -0,0 +1,17 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet_macros::*; + +#[pallet_section] +mod storages { + #[pallet::storage] + pub type MyStorageMap = StorageMap<_, _, u32, u64>; +} + +#[import_section(storages)] +pub mod pallet { + +} + +fn main() { +} diff --git a/frame/support/test/tests/split_ui/import_without_pallet.stderr b/frame/support/test/tests/split_ui/import_without_pallet.stderr new file mode 100644 index 0000000000000..0d7b5414b1016 --- /dev/null +++ b/frame/support/test/tests/split_ui/import_without_pallet.stderr @@ -0,0 +1,5 @@ +error: `#[import_section]` can only be applied to a valid pallet module + --> tests/split_ui/import_without_pallet.rs:12:9 + | +12 | pub mod pallet { + | ^^^^^^ diff --git a/frame/support/test/tests/split_ui/no_section_found.rs b/frame/support/test/tests/split_ui/no_section_found.rs new file mode 100644 index 0000000000000..fe12c6dc51b72 --- /dev/null +++ b/frame/support/test/tests/split_ui/no_section_found.rs @@ -0,0 +1,29 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet_macros::*; + +pub use pallet::*; + +#[import_section(storages_dev)] +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + pub fn my_call(_origin: OriginFor) -> DispatchResult { + MyStorageMap::::insert(1, 2); + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/split_ui/no_section_found.stderr b/frame/support/test/tests/split_ui/no_section_found.stderr new file mode 100644 index 0000000000000..e0a9322b188e3 --- /dev/null +++ b/frame/support/test/tests/split_ui/no_section_found.stderr @@ -0,0 +1,13 @@ +error[E0432]: unresolved import `pallet` + --> tests/split_ui/no_section_found.rs:5:9 + | +5 | pub use pallet::*; + | ^^^^^^ help: a similar path exists: `test_pallet::pallet` + +error: cannot find macro `__export_tokens_tt_storages_dev` in this scope + --> tests/split_ui/no_section_found.rs:7:1 + | +7 | #[import_section(storages_dev)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/split_ui/pass/split_valid.rs b/frame/support/test/tests/split_ui/pass/split_valid.rs new file mode 100644 index 0000000000000..8b5839ecd28a0 --- /dev/null +++ b/frame/support/test/tests/split_ui/pass/split_valid.rs @@ -0,0 +1,40 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet_macros::*; + +pub use pallet::*; + +#[pallet_section] +mod events { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + SomethingDone, + } +} + +#[import_section(events)] +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl Pallet { + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Self::deposit_event(Event::SomethingDone); + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/split_ui/pass/split_valid_disambiguation.rs b/frame/support/test/tests/split_ui/pass/split_valid_disambiguation.rs new file mode 100644 index 0000000000000..8d8d50422e9ce --- /dev/null +++ b/frame/support/test/tests/split_ui/pass/split_valid_disambiguation.rs @@ -0,0 +1,61 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet_macros::*; + +pub use pallet::*; + +mod first { + use super::*; + + #[pallet_section] + mod section { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + SomethingDone, + } + } +} + +mod second { + use super::*; + + #[pallet_section(section2)] + mod section { + #[pallet::error] + pub enum Error { + NoneValue, + } + } +} + +#[import_section(first::section)] +#[import_section(second::section2)] +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl Pallet { + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Self::deposit_event(Event::SomethingDone); + Ok(()) + } + + pub fn my_call_2(_origin: OriginFor) -> DispatchResult { + return Err(Error::::NoneValue.into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/split_ui/section_not_imported.rs b/frame/support/test/tests/split_ui/section_not_imported.rs new file mode 100644 index 0000000000000..bcabf66256771 --- /dev/null +++ b/frame/support/test/tests/split_ui/section_not_imported.rs @@ -0,0 +1,34 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet_macros::*; + +pub use pallet::*; + +#[pallet_section] +mod storages { + #[pallet::storage] + pub type MyStorageMap = StorageMap<_, _, u32, u64>; +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + pub fn my_call(_origin: OriginFor) -> DispatchResult { + MyStorageMap::::insert(1, 2); + Ok(()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/split_ui/section_not_imported.stderr b/frame/support/test/tests/split_ui/section_not_imported.stderr new file mode 100644 index 0000000000000..41ac2a5f58d25 --- /dev/null +++ b/frame/support/test/tests/split_ui/section_not_imported.stderr @@ -0,0 +1,8 @@ +error[E0433]: failed to resolve: use of undeclared type `MyStorageMap` + --> tests/split_ui/section_not_imported.rs:27:4 + | +27 | MyStorageMap::::insert(1, 2); + | ^^^^^^^^^^^^ + | | + | use of undeclared type `MyStorageMap` + | help: a struct with a similar name exists: `StorageMap` diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 3e306834869bb..b825c85f9564c 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -58,7 +58,7 @@ pub mod pallet { } pub type BlockNumber = u32; -pub type Index = u64; +pub type Nonce = u64; pub type AccountId = u64; pub type Header = sp_runtime::generic::Header; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -70,13 +70,12 @@ impl frame_system::Config for Runtime { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = Index; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type DbWeight = (); @@ -94,12 +93,7 @@ impl frame_system::Config for Runtime { impl Config for Runtime {} frame_support::construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub struct Runtime { System: frame_system, MyPallet: pallet, } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index 5fc4ba7cca6d9..c477433086098 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -19,12 +19,12 @@ #![allow(deprecated)] use frame_support::{ - assert_noop, assert_ok, assert_storage_noop, + assert_noop, assert_ok, assert_storage_noop, derive_impl, dispatch::DispatchResult, storage::{with_transaction, TransactionOutcome::*}, transactional, }; -use sp_core::sr25519; +use sp_core::{sr25519, ConstU32}; use sp_io::TestExternalities; use sp_runtime::{ generic, @@ -36,10 +36,9 @@ pub use self::pallet::*; #[frame_support::pallet] pub mod pallet { - use self::frame_system::pallet_prelude::*; use super::*; use frame_support::pallet_prelude::*; - use frame_support_test as frame_system; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] @@ -82,25 +81,23 @@ pub type Block = generic::Block; frame_support::construct_runtime!( pub enum Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + { - System: frame_support_test, + System: frame_system, MyPallet: pallet, } ); -impl frame_support_test::Config for Runtime { - type BlockNumber = BlockNumber; - type AccountId = AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type DbWeight = (); + type OnSetCode = (); } impl Config for Runtime {} diff --git a/frame/support/test/tests/versioned_runtime_upgrade.rs b/frame/support/test/tests/versioned_runtime_upgrade.rs new file mode 100644 index 0000000000000..93d87df8ca185 --- /dev/null +++ b/frame/support/test/tests/versioned_runtime_upgrade.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for VersionedRuntimeUpgrade + +#![cfg(all(feature = "experimental", feature = "try-runtime"))] + +use frame_support::{ + construct_runtime, derive_impl, + migrations::VersionedRuntimeUpgrade, + parameter_types, + traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}, + weights::constants::RocksDbWeight, +}; +use frame_system::Config; +use sp_core::ConstU64; +use sp_runtime::BuildStorage; + +type Block = frame_system::mocking::MockBlock; + +#[frame_support::pallet] +mod dummy_pallet { + use frame_support::pallet_prelude::*; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } +} + +impl dummy_pallet::Config for Test {} + +construct_runtime!( + pub enum Test + { + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + DummyPallet: dummy_pallet::{Pallet, Config, Storage} = 1, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockHashCount = ConstU64<10>; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext: sp_io::TestExternalities = sp_io::TestExternalities::from(storage); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +/// A dummy migration for testing the `VersionedRuntimeUpgrade` trait. +/// Sets SomeStorage to S. +struct SomeUnversionedMigration(sp_std::marker::PhantomData); + +parameter_types! { + const UpgradeReads: u64 = 4; + const UpgradeWrites: u64 = 2; + const PreUpgradeReturnBytes: [u8; 4] = [0, 1, 2, 3]; + static PreUpgradeCalled: bool = false; + static PostUpgradeCalled: bool = false; + static PostUpgradeCalledWith: Vec = Vec::new(); +} + +/// Implement `OnRuntimeUpgrade` for `SomeUnversionedMigration`. +/// It sets SomeStorage to S, and returns a weight derived from UpgradeReads and UpgradeWrites. +impl OnRuntimeUpgrade for SomeUnversionedMigration { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + PreUpgradeCalled::set(true); + Ok(PreUpgradeReturnBytes::get().to_vec()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + dummy_pallet::SomeStorage::::put(S); + RocksDbWeight::get().reads_writes(UpgradeReads::get(), UpgradeWrites::get()) + } + + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + PostUpgradeCalled::set(true); + PostUpgradeCalledWith::set(state); + Ok(()) + } +} + +type VersionedMigrationV0ToV1 = + VersionedRuntimeUpgrade<0, 1, SomeUnversionedMigration, DummyPallet, RocksDbWeight>; + +type VersionedMigrationV1ToV2 = + VersionedRuntimeUpgrade<1, 2, SomeUnversionedMigration, DummyPallet, RocksDbWeight>; + +type VersionedMigrationV2ToV4 = + VersionedRuntimeUpgrade<2, 4, SomeUnversionedMigration, DummyPallet, RocksDbWeight>; + +#[test] +fn successful_upgrade_path() { + new_test_ext().execute_with(|| { + // on-chain storage version and value in storage start at zero + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 0); + + // Execute the migration from version 0 to 1 and verify it was successful + VersionedMigrationV0ToV1::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(1)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 1); + + // Execute the migration from version 1 to 2 and verify it was successful + VersionedMigrationV1ToV2::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(2)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 2); + + // Execute the migration from version 2 to 4 and verify it was successful + VersionedMigrationV2ToV4::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(4)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 4); + }); +} + +#[test] +fn future_version_upgrade_is_ignored() { + new_test_ext().execute_with(|| { + // Executing V1 to V2 on V0 should be a noop + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 0); + VersionedMigrationV1ToV2::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 0); + }); +} + +#[test] +fn past_version_upgrade_is_ignored() { + new_test_ext().execute_with(|| { + // Upgrade to V2 + VersionedMigrationV0ToV1::on_runtime_upgrade(); + VersionedMigrationV1ToV2::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(2)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 2); + + // Now, V0 to V1 and V1 to V2 should both be noops + dummy_pallet::SomeStorage::::put(1000); + VersionedMigrationV0ToV1::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(2)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 1000); + VersionedMigrationV1ToV2::on_runtime_upgrade(); + assert_eq!(DummyPallet::on_chain_storage_version(), StorageVersion::new(2)); + assert_eq!(dummy_pallet::SomeStorage::::get(), 1000); + }); +} + +#[test] +fn weights_are_returned_correctly() { + new_test_ext().execute_with(|| { + // Successful upgrade requires 1 additional read and write + let weight = VersionedMigrationV0ToV1::on_runtime_upgrade(); + assert_eq!( + weight, + RocksDbWeight::get().reads_writes(UpgradeReads::get() + 1, UpgradeWrites::get() + 1) + ); + + // Noop upgrade requires only 1 read + let weight = VersionedMigrationV0ToV1::on_runtime_upgrade(); + assert_eq!(weight, RocksDbWeight::get().reads(1)); + }); +} + +#[test] +fn pre_and_post_checks_behave_correctly() { + new_test_ext().execute_with(|| { + // Check initial state + assert_eq!(PreUpgradeCalled::get(), false); + assert_eq!(PostUpgradeCalled::get(), false); + assert_eq!(PostUpgradeCalledWith::get(), Vec::::new()); + + // Check pre/post hooks are called correctly when upgrade occurs. + VersionedMigrationV0ToV1::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(PreUpgradeCalled::get(), true); + assert_eq!(PostUpgradeCalled::get(), true); + assert_eq!(PostUpgradeCalledWith::get(), PreUpgradeReturnBytes::get().to_vec()); + + // Reset hook tracking state. + PreUpgradeCalled::set(false); + PostUpgradeCalled::set(false); + + // Check pre/post hooks are not called when an upgrade is skipped. + VersionedMigrationV0ToV1::try_on_runtime_upgrade(true).unwrap(); + assert_eq!(PreUpgradeCalled::get(), false); + assert_eq!(PostUpgradeCalled::get(), false); + }) +} diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 1fa52de10e85c..d1a0124d9923f 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -14,21 +14,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] cfg-if = "1.0" -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-version = { version = "22.0.0", default-features = false, path = "../../primitives/version", features = ["serde"] } +sp-weights = { version = "20.0.0", default-features = false, path = "../../primitives/weights", features = ["serde"] } [dev-dependencies] criterion = "0.4.0" -sp-externalities = { version = "0.13.0", path = "../../primitives/externalities" } +sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] @@ -38,19 +38,23 @@ std = [ "frame-support/std", "log/std", "scale-info/std", - "serde", + "serde/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", "sp-version/std", "sp-weights/std", + "sp-externalities/std" ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", + "sp-runtime/try-runtime" +] [[bench]] name = "bench" diff --git a/frame/system/README.md b/frame/system/README.md index c22b41e42d798..30b2ea73720cf 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -33,7 +33,7 @@ The System module defines the following extensions: - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not exceed the limits. - [`CheckNonce`]: Checks the nonce of the transaction. Contains a single payload of type - `T::Index`. + `T::Nonce`. - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the signed payload of the transaction. diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index e2fed3e51855d..da8bb10fd4e42 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -19,11 +19,9 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, + BuildStorage, Perbill, }; - #[frame_support::pallet] mod module { use frame_support::pallet_prelude::*; @@ -43,16 +41,12 @@ mod module { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Module: module::{Pallet, Event}, } ); @@ -69,14 +63,13 @@ impl frame_system::Config for Runtime { type BlockLength = BlockLength; type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -95,8 +88,8 @@ impl module::Config for Runtime { } fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() + frame_system::GenesisConfig::::default() + .build_storage() .unwrap() .into() } diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 8b189848d8f91..0d8165a3a44ca 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -13,19 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -sp-io = { version = "7.0.0", path = "../../../primitives/io" } -sp-externalities = { version = "0.13.0", path = "../../../primitives/externalities" } -sp-version = { version = "5.0.0", path = "../../../primitives/version" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-externalities = { version = "0.19.0", path = "../../../primitives/externalities" } +sp-version = { version = "22.0.0", path = "../../../primitives/version" } [features] default = ["std"] @@ -38,10 +38,14 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-externalities/std", + "sp-io/std", + "sp-version/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 1cd7b1bac6bd5..d85b631af0185 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -18,9 +18,13 @@ // Benchmarks for Utility Pallet #![cfg_attr(not(feature = "std"), no_std)] +#![cfg(feature = "runtime-benchmarks")] use codec::Encode; -use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; +use frame_benchmarking::{ + v1::{benchmarks, whitelisted_caller}, + BenchmarkError, +}; use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; @@ -30,7 +34,26 @@ use sp_std::{prelude::*, vec}; mod mock; pub struct Pallet(System); -pub trait Config: frame_system::Config {} +pub trait Config: frame_system::Config { + /// Adds ability to the Runtime to test against their sample code. + /// + /// Default is `../res/kitchensink_runtime.compact.compressed.wasm`. + fn prepare_set_code_data() -> Vec { + include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec() + } + + /// Adds ability to the Runtime to prepare/initialize before running benchmark `set_code`. + fn setup_set_code_requirements(_code: &Vec) -> Result<(), BenchmarkError> { + Ok(()) + } + + /// Adds ability to the Runtime to do custom validation after benchmark. + /// + /// Default is checking for `CodeUpdated` event . + fn verify_set_code() { + System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); + } +} benchmarks! { remark { @@ -49,16 +72,18 @@ benchmarks! { }: _(RawOrigin::Root, Default::default()) set_code { - let runtime_blob = include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec(); + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; }: _(RawOrigin::Root, runtime_blob) verify { - System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); + T::verify_set_code() } #[extra] set_code_without_checks { // Assume Wasm ~4MB let code = vec![1; 4_000_000 as usize]; + T::setup_set_code_requirements(&code)?; }: _(RawOrigin::Root, code) verify { let current_code = storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 8b05c5a8ba82a..4e6b1221da356 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -20,22 +20,17 @@ #![cfg(test)] use codec::Encode; -use sp_runtime::traits::IdentityLookup; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; type AccountId = u64; -type AccountIndex = u32; -type BlockNumber = u64; +type Nonce = u32; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, } ); @@ -45,14 +40,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); @@ -81,7 +75,7 @@ impl sp_core::traits::ReadRuntimeVersion for MockedReadRuntimeVersion { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let version = sp_version::RuntimeVersion { spec_name: "spec_name".into(), diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index cedb4e35be0b8..aef2cfc6e25f4 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } [features] diff --git a/frame/system/rpc/runtime-api/src/lib.rs b/frame/system/rpc/runtime-api/src/lib.rs index 2ea9f2f62e11c..f59988d818f07 100644 --- a/frame/system/rpc/runtime-api/src/lib.rs +++ b/frame/system/rpc/runtime-api/src/lib.rs @@ -24,12 +24,12 @@ #![cfg_attr(not(feature = "std"), no_std)] sp_api::decl_runtime_apis! { - /// The API to query account nonce (aka transaction index). - pub trait AccountNonceApi where + /// The API to query account nonce. + pub trait AccountNonceApi where AccountId: codec::Codec, - Index: codec::Codec, + Nonce: codec::Codec, { /// Get current account nonce of given `AccountId`. - fn account_nonce(account: AccountId) -> Index; + fn account_nonce(account: AccountId) -> Nonce; } } diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index 5964ec452842f..76a711a823e7d 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Pallet}; +use crate::{pallet_prelude::BlockNumberFor, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ @@ -60,7 +60,7 @@ impl SignedExtension for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; fn additional_signed(&self) -> Result { - Ok(>::block_hash(T::BlockNumber::zero())) + Ok(>::block_hash(BlockNumberFor::::zero())) } fn pre_dispatch( diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 23c357d481350..148dfd4aad471 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{BlockHash, Config, Pallet}; +use crate::{pallet_prelude::BlockNumberFor, BlockHash, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ @@ -78,7 +78,7 @@ impl SignedExtension for CheckMortality { fn additional_signed(&self) -> Result { let current_u64 = >::block_number().saturated_into::(); - let n = self.0.birth(current_u64).saturated_into::(); + let n = self.0.birth(current_u64).saturated_into::>(); if !>::contains_key(n) { Err(InvalidTransaction::AncientBirthBlock.into()) } else { diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 57ebd7701ef6a..2939fd6534c09 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -37,11 +37,11 @@ use sp_std::vec; /// some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckNonce(#[codec(compact)] pub T::Index); +pub struct CheckNonce(#[codec(compact)] pub T::Nonce); impl CheckNonce { /// utility constructor. Used only in client/factory code. - pub fn from(nonce: T::Index) -> Self { + pub fn from(nonce: T::Nonce) -> Self { Self(nonce) } } @@ -88,7 +88,7 @@ where } .into()) } - account.nonce += T::Index::one(); + account.nonce += T::Nonce::one(); crate::Account::::insert(who, account); Ok(()) } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index f3d75f719d54c..c5a36c5059e95 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -50,7 +50,7 @@ //! - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not //! exceed the limits. //! - [`CheckNonce`]: Checks the nonce of the transaction. Contains a single payload of type -//! `T::Index`. +//! `T::Nonce`. //! - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. //! - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the //! signed payload of the transaction. @@ -64,6 +64,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use pallet_prelude::{BlockNumberFor, HeaderFor}; #[cfg(feature = "std")] use serde::Serialize; use sp_io::hashing::blake2_256; @@ -72,9 +73,9 @@ use sp_runtime::traits::TrailingZeroInput; use sp_runtime::{ generic, traits::{ - self, AtLeast32Bit, AtLeast32BitUnsigned, BadOrigin, BlockNumberProvider, Bounded, - CheckEqual, Dispatchable, Hash, Lookup, LookupError, MaybeDisplay, - MaybeSerializeDeserialize, Member, One, Saturating, SimpleBitOps, StaticLookup, Zero, + self, AtLeast32Bit, BadOrigin, BlockNumberProvider, Bounded, CheckEqual, Dispatchable, + Hash, Header, Lookup, LookupError, MaybeDisplay, MaybeSerializeDeserialize, Member, One, + Saturating, SimpleBitOps, StaticLookup, Zero, }, DispatchError, RuntimeDebug, }; @@ -84,15 +85,18 @@ use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use sp_version::RuntimeVersion; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +#[cfg(feature = "std")] +use frame_support::traits::BuildGenesisConfig; use frame_support::{ dispatch::{ extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, }, + impl_ensure_origin_with_arg_ignoring_arg, storage::{self, StorageStreamIter}, traits::{ - ConstU32, Contains, EnsureOrigin, Get, HandleLifetime, OnKilledAccount, OnNewAccount, - OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, + ConstU32, Contains, EnsureOrigin, EnsureOriginWithArg, Get, HandleLifetime, + OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, }, Parameter, }; @@ -100,8 +104,6 @@ use scale_info::TypeInfo; use sp_core::storage::well_known_keys; use sp_weights::{RuntimeDbWeight, Weight}; -#[cfg(feature = "std")] -use frame_support::traits::GenesisBuild; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; @@ -201,12 +203,52 @@ pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; use frame_support::pallet_prelude::*; + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::DefaultConfig; + + /// Provides a viable default config that can be used with + /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config + /// based on this one. + /// + /// See `Test` in the `default-config` example pallet's `test.rs` for an example of + /// a downstream user of this particular `TestDefaultConfig` + pub struct TestDefaultConfig; + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type Nonce = u32; + type Hash = sp_core::hash::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type MaxConsumers = frame_support::traits::ConstU32<16>; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type Version = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + } + } + /// System configuration trait. Implemented by runtime. - #[pallet::config] + #[pallet::config(with_default)] #[pallet::disable_frame_system_supertrait_check] pub trait Config: 'static + Eq + Clone { + /// The aggregated event type of the runtime. + type RuntimeEvent: Parameter + + Member + + From> + + Debug + + IsType<::RuntimeEvent>; + /// The basic call filter to use in Origin. All origins are built with this filter as base, /// except Root. + #[pallet::no_default] type BaseCallFilter: Contains; /// Block & extrinsics weights: base values and limits. @@ -218,20 +260,21 @@ pub mod pallet { type BlockLength: Get; /// The `RuntimeOrigin` type used by dispatchable calls. + #[pallet::no_default] type RuntimeOrigin: Into, Self::RuntimeOrigin>> + From> + Clone - + OriginTrait; + + OriginTrait; /// The aggregated `RuntimeCall` type. + #[pallet::no_default] type RuntimeCall: Parameter + Dispatchable + Debug + From>; - /// Account index (aka nonce) type. This stores the number of previous transactions - /// associated with a sender account. - type Index: Parameter + /// This stores the number of previous transactions associated with a sender account. + type Nonce: Parameter + Member + MaybeSerializeDeserialize + Debug @@ -241,21 +284,6 @@ pub mod pallet { + Copy + MaxEncodedLen; - /// The block number type used by the runtime. - type BlockNumber: Parameter - + Member - + MaybeSerializeDeserialize - + Debug - + MaybeDisplay - + AtLeast32BitUnsigned - + Default - + Bounded - + Copy - + sp_std::hash::Hash - + sp_std::str::FromStr - + MaxEncodedLen - + TypeInfo; - /// The output of the `Hashing` function. type Hash: Parameter + Member @@ -292,19 +320,15 @@ pub mod pallet { /// functional/efficient alternatives. type Lookup: StaticLookup; - /// The block header. - type Header: Parameter + traits::Header; - - /// The aggregated event type of the runtime. - type RuntimeEvent: Parameter - + Member - + From> - + Debug - + IsType<::RuntimeEvent>; + /// The Block type used by the runtime. This is used by `construct_runtime` to retrieve the + /// extrinsics or other block specific data as needed. + #[pallet::no_default] + type Block: Parameter + Member + traits::Block; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). #[pallet::constant] - type BlockHashCount: Get; + #[pallet::no_default] + type BlockHashCount: Get>; /// The weight of runtime database operations the runtime can invoke. #[pallet::constant] @@ -318,8 +342,7 @@ pub mod pallet { /// /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the /// runtime. - /// - /// For tests it is okay to use `()` as type, however it will provide "useless" data. + #[pallet::no_default] type PalletInfo: PalletInfo; /// Data to be associated with an account (other than nonce/transaction counter, which this @@ -351,6 +374,7 @@ pub mod pallet { /// [`Pallet::update_code_in_storage`]). /// It's unlikely that this needs to be customized, unless you are writing a parachain using /// `Cumulus`, where the actual code change is deferred. + #[pallet::no_default] type OnSetCode: SetCode; /// The maximum number of consumers allowed on a single account. @@ -364,9 +388,7 @@ pub mod pallet { impl Hooks> for Pallet { #[cfg(feature = "std")] fn integrity_test() { - sp_io::TestExternalities::default().execute_with(|| { - T::BlockWeights::get().validate().expect("The weights are invalid."); - }); + T::BlockWeights::get().validate().expect("The weights are invalid."); } } @@ -374,11 +396,10 @@ pub mod pallet { impl Pallet { /// Make some on-chain remark. /// - /// - `O(1)` + /// Can be executed by every `origin`. #[pallet::call_index(0)] #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] - pub fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { - ensure_signed_or_root(origin)?; + pub fn remark(_origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { Ok(().into()) } @@ -528,7 +549,7 @@ pub mod pallet { _, Blake2_128Concat, T::AccountId, - AccountInfo, + AccountInfo, ValueQuery, >; @@ -550,7 +571,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn block_hash)] pub type BlockHash = - StorageMap<_, Twox64Concat, T::BlockNumber, T::Hash, ValueQuery>; + StorageMap<_, Twox64Concat, BlockNumberFor, T::Hash, ValueQuery>; /// Extrinsics data for the current block (maps an extrinsic's index to its data). #[pallet::storage] @@ -563,7 +584,7 @@ pub mod pallet { #[pallet::storage] #[pallet::whitelist_storage] #[pallet::getter(fn block_number)] - pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; + pub(super) type Number = StorageValue<_, BlockNumberFor, ValueQuery>; /// Hash of the previous block. #[pallet::storage] @@ -602,14 +623,14 @@ pub mod pallet { /// allows light-clients to leverage the changes trie storage tracking mechanism and /// in case of changes fetch the list of events of interest. /// - /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just + /// The value has the type `(BlockNumberFor, EventIndex)` because if we used only just /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. #[pallet::storage] #[pallet::unbounded] #[pallet::getter(fn event_topics)] pub(super) type EventTopics = - StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; + StorageMap<_, Blake2_128Concat, T::Hash, Vec<(BlockNumberFor, EventIndex)>, ValueQuery>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. #[pallet::storage] @@ -630,17 +651,19 @@ pub mod pallet { #[pallet::whitelist_storage] pub(super) type ExecutionPhase = StorageValue<_, Phase>; - #[derive(Default)] + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] - pub struct GenesisConfig { - #[cfg_attr(feature = "std", serde(with = "sp_core::bytes"))] + pub struct GenesisConfig { + #[serde(with = "sp_core::bytes")] pub code: Vec, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - >::insert::<_, T::Hash>(T::BlockNumber::zero(), hash69()); + >::insert::<_, T::Hash>(BlockNumberFor::::zero(), hash69()); >::put::(hash69()); >::put(LastRuntimeUpgradeInfo::from(T::Version::get())); >::put(true); @@ -652,26 +675,6 @@ pub mod pallet { } } -#[cfg(feature = "std")] -impl GenesisConfig { - /// Direct implementation of `GenesisBuild::build_storage`. - /// - /// Kept in order not to break dependency. - pub fn build_storage(&self) -> Result { - >::build_storage(self) - } - - /// Direct implementation of `GenesisBuild::assimilate_storage`. - /// - /// Kept in order not to break dependency. - pub fn assimilate_storage( - &self, - storage: &mut sp_runtime::Storage, - ) -> Result<(), String> { - >::assimilate_storage(self, storage) - } -} - pub type Key = Vec; pub type KeyValue = (Vec, Vec); @@ -707,7 +710,6 @@ pub struct EventRecord { // Create a Hash with 69 for each byte, // only used to build genesis config. -#[cfg(feature = "std")] fn hash69 + Default>() -> T { let mut h = T::default(); h.as_mut().iter_mut().for_each(|byte| *byte = 69); @@ -725,9 +727,9 @@ pub type RefCount = u32; /// Information of an account. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct AccountInfo { +pub struct AccountInfo { /// The number of transactions this account has sent. - pub nonce: Index, + pub nonce: Nonce, /// The number of other modules that currently depend on this account's existence. The account /// cannot be reaped until this is zero. pub consumers: RefCount, @@ -785,6 +787,12 @@ impl, O>> + From>, Acco } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., AccountId: Decode, T } > + EnsureOriginWithArg for EnsureRoot + {} +} + /// Ensure the origin is Root and return the provided `Success` value. pub struct EnsureRootWithSuccess( sp_std::marker::PhantomData<(AccountId, Success)>, @@ -809,6 +817,12 @@ impl< } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., AccountId: Decode, Success: TypedGet, T } > + EnsureOriginWithArg for EnsureRootWithSuccess + {} +} + /// Ensure the origin is provided `Ensure` origin and return the provided `Success` value. pub struct EnsureWithSuccess( sp_std::marker::PhantomData<(Ensure, AccountId, Success)>, @@ -854,6 +868,12 @@ impl, O>> + From>, Acco } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., AccountId: Decode, T } > + EnsureOriginWithArg for EnsureSigned + {} +} + /// Ensure the origin is `Signed` origin from the given `AccountId`. pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< @@ -880,6 +900,12 @@ impl< } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., Who: SortedMembers, AccountId: PartialEq + Clone + Ord + Decode, T } > + EnsureOriginWithArg for EnsureSignedBy + {} +} + /// Ensure the origin is `None`. i.e. unsigned transaction. pub struct EnsureNone(sp_std::marker::PhantomData); impl, O>> + From>, AccountId> @@ -899,10 +925,16 @@ impl, O>> + From>, Acco } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O: .., AccountId, T } > + EnsureOriginWithArg for EnsureNone + {} +} + /// Always fail. -pub struct EnsureNever(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureNever { - type Success = T; +pub struct EnsureNever(sp_std::marker::PhantomData); +impl EnsureOrigin for EnsureNever { + type Success = Success; fn try_origin(o: O) -> Result { Err(o) } @@ -913,6 +945,12 @@ impl EnsureOrigin for EnsureNever { } } +impl_ensure_origin_with_arg_ignoring_arg! { + impl< { O, Success, T } > + EnsureOriginWithArg for EnsureNever + {} +} + /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result @@ -1257,7 +1295,7 @@ impl Pallet { let phase = ExecutionPhase::::get().unwrap_or_default(); let event = EventRecord { phase, event, topics: topics.to_vec() }; - // Index of the to be added event. + // Index of the event to be added. let event_idx = { let old_event_count = EventCount::::get(); let new_event_count = match old_event_count.checked_add(1) { @@ -1313,7 +1351,7 @@ impl Pallet { } /// Start the execution of a particular block. - pub fn initialize(number: &T::BlockNumber, parent_hash: &T::Hash, digest: &generic::Digest) { + pub fn initialize(number: &BlockNumberFor, parent_hash: &T::Hash, digest: &generic::Digest) { // populate environment ExecutionPhase::::put(Phase::Initialization); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); @@ -1330,7 +1368,7 @@ impl Pallet { /// Remove temporary "environment" entries in storage, compute the storage root and return the /// resulting header for this block. - pub fn finalize() -> T::Header { + pub fn finalize() -> HeaderFor { log::debug!( target: LOG_TARGET, "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ @@ -1402,13 +1440,7 @@ impl Pallet { let storage_root = T::Hash::decode(&mut &sp_io::storage::root(version)[..]) .expect("Node is configured to use the same hash; qed"); - ::new( - number, - extrinsics_root, - storage_root, - parent_hash, - digest, - ) + HeaderFor::::new(number, extrinsics_root, storage_root, parent_hash, digest) } /// Deposits a log and ensures it matches the block's log data. @@ -1421,8 +1453,8 @@ impl Pallet { pub fn externalities() -> TestExternalities { TestExternalities::new(sp_core::storage::Storage { top: map![ - >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), - >::hashed_key().to_vec() => T::BlockNumber::one().encode(), + >::hashed_key_for(BlockNumberFor::::zero()) => [69u8; 32].encode(), + >::hashed_key().to_vec() => BlockNumberFor::::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], @@ -1467,7 +1499,7 @@ impl Pallet { /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn set_block_number(n: T::BlockNumber) { + pub fn set_block_number(n: BlockNumberFor) { >::put(n); } @@ -1533,13 +1565,13 @@ impl Pallet { } /// Retrieve the account transaction counter from storage. - pub fn account_nonce(who: impl EncodeLike) -> T::Index { + pub fn account_nonce(who: impl EncodeLike) -> T::Nonce { Account::::get(who).nonce } /// Increment a particular account's nonce by 1. pub fn inc_account_nonce(who: impl EncodeLike) { - Account::::mutate(who, |a| a.nonce += T::Index::one()); + Account::::mutate(who, |a| a.nonce += T::Nonce::one()); } /// Note what the extrinsic data of the current extrinsic index is. @@ -1595,7 +1627,7 @@ impl Pallet { } /// An account is being created. - pub fn on_created_account(who: T::AccountId, _a: &mut AccountInfo) { + pub fn on_created_account(who: T::AccountId, _a: &mut AccountInfo) { T::OnNewAccount::on_new_account(&who); Self::deposit_event(Event::NewAccount { account: who }); } @@ -1685,7 +1717,7 @@ impl HandleLifetime for Consumer { } impl BlockNumberProvider for Pallet { - type BlockNumber = ::BlockNumber; + type BlockNumber = BlockNumberFor; fn current_block_number() -> Self::BlockNumber { Pallet::::block_number() @@ -1756,6 +1788,10 @@ pub mod pallet_prelude { /// Type alias for the `Origin` associated type of system config. pub type OriginFor = ::RuntimeOrigin; + /// Type alias for the `Header`. + pub type HeaderFor = + <::Block as sp_runtime::traits::HeaderProvider>::HeaderT; + /// Type alias for the `BlockNumber` associated type of system config. - pub type BlockNumberFor = ::BlockNumber; + pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; } diff --git a/frame/system/src/migrations/mod.rs b/frame/system/src/migrations/mod.rs index f8ebfab33b891..6f873061dbab9 100644 --- a/frame/system/src/migrations/mod.rs +++ b/frame/system/src/migrations/mod.rs @@ -31,8 +31,8 @@ type RefCount = u32; /// Information of an account. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -struct AccountInfo { - nonce: Index, +struct AccountInfo { + nonce: Nonce, consumers: RefCount, providers: RefCount, sufficients: RefCount, @@ -47,8 +47,8 @@ pub trait V2ToV3 { /// System config account id type AccountId: 'static + FullCodec; - /// System config index - type Index: 'static + FullCodec + Copy; + /// System config nonce + type Nonce: 'static + FullCodec + Copy; /// System config account data type AccountData: 'static + FullCodec; @@ -65,13 +65,13 @@ type Account = StorageMap< Pallet, Blake2_128Concat, ::AccountId, - AccountInfo<::Index, ::AccountData>, + AccountInfo<::Nonce, ::AccountData>, >; /// Migrate from unique `u8` reference counting to triple `u32` reference counting. pub fn migrate_from_single_u8_to_triple_ref_count() -> Weight { let mut translated: usize = 0; - >::translate::<(V::Index, u8, V::AccountData), _>(|_key, (nonce, rc, data)| { + >::translate::<(V::Nonce, u8, V::AccountData), _>(|_key, (nonce, rc, data)| { translated += 1; Some(AccountInfo { nonce, consumers: rc as RefCount, providers: 1, sufficients: 0, data }) }); @@ -88,7 +88,7 @@ pub fn migrate_from_single_u8_to_triple_ref_count() -> Wei /// Migrate from unique `u32` reference counting to triple `u32` reference counting. pub fn migrate_from_single_to_triple_ref_count() -> Weight { let mut translated: usize = 0; - >::translate::<(V::Index, RefCount, V::AccountData), _>( + >::translate::<(V::Nonce, RefCount, V::AccountData), _>( |_key, (nonce, consumers, data)| { translated += 1; Some(AccountInfo { nonce, consumers, providers: 1, sufficients: 0, data }) @@ -106,7 +106,7 @@ pub fn migrate_from_single_to_triple_ref_count() -> Weight /// Migrate from dual `u32` reference counting to triple `u32` reference counting. pub fn migrate_from_dual_to_triple_ref_count() -> Weight { let mut translated: usize = 0; - >::translate::<(V::Index, RefCount, RefCount, V::AccountData), _>( + >::translate::<(V::Nonce, RefCount, RefCount, V::AccountData), _>( |_key, (nonce, consumers, providers, data)| { translated += 1; Some(AccountInfo { nonce, consumers, providers, sufficients: 0, data }) diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 83e12dccaa165..c016ea9e1cd14 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -22,21 +22,16 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, }; -type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; type Block = mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, } ); @@ -96,13 +91,12 @@ impl Config for Test { type BlockLength = RuntimeBlockLength; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<10>; type DbWeight = DbWeight; @@ -126,7 +120,7 @@ pub const CALL: &::RuntimeCall = /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { let mut ext: sp_io::TestExternalities = - GenesisConfig::default().build_storage().unwrap().into(); + RuntimeGenesisConfig::default().build_storage().unwrap().into(); // Add to each test the initial weight of a block ext.execute_with(|| { System::register_extra_weight_unchecked( diff --git a/frame/system/src/mocking.rs b/frame/system/src/mocking.rs index 8f76c1b8e08ba..833309e05ecc9 100644 --- a/frame/system/src/mocking.rs +++ b/frame/system/src/mocking.rs @@ -29,6 +29,19 @@ pub type MockUncheckedExtrinsic = generic::Unchec /// An implementation of `sp_runtime::traits::Block` to be used in tests. pub type MockBlock = generic::Block< - generic::Header<::BlockNumber, sp_runtime::traits::BlakeTwo256>, + generic::Header, + MockUncheckedExtrinsic, +>; + +/// An implementation of `sp_runtime::traits::Block` to be used in tests with u32 BlockNumber type. +pub type MockBlockU32 = generic::Block< + generic::Header, + MockUncheckedExtrinsic, +>; + +/// An implementation of `sp_runtime::traits::Block` to be used in tests with u128 BlockNumber +/// type. +pub type MockBlockU128 = generic::Block< + generic::Header, MockUncheckedExtrinsic, >; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 742146d1642c8..313a557c44d76 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -486,7 +486,7 @@ pub trait CreateSignedTransaction: call: Self::OverarchingCall, public: Self::Public, account: Self::AccountId, - nonce: Self::Index, + nonce: Self::Nonce, ) -> Option<(Self::OverarchingCall, ::SignaturePayload)>; } diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 05a7e96fdecb1..165df688b1c2c 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -746,19 +746,20 @@ fn ensure_signed_stuff_works() { } let signed_origin = RuntimeOrigin::signed(0u64); - assert_ok!(EnsureSigned::try_origin(signed_origin.clone())); - assert_ok!(EnsureSignedBy::::try_origin(signed_origin)); + assert_ok!( as EnsureOrigin<_>>::try_origin(signed_origin.clone())); + assert_ok!( as EnsureOrigin<_>>::try_origin(signed_origin)); #[cfg(feature = "runtime-benchmarks")] { - let successful_origin: RuntimeOrigin = EnsureSigned::try_successful_origin() - .expect("EnsureSigned has no successful origin required for the test"); - assert_ok!(EnsureSigned::try_origin(successful_origin)); + let successful_origin: RuntimeOrigin = + as EnsureOrigin<_>>::try_successful_origin() + .expect("EnsureSigned has no successful origin required for the test"); + assert_ok!( as EnsureOrigin<_>>::try_origin(successful_origin)); let successful_origin: RuntimeOrigin = - EnsureSignedBy::::try_successful_origin() + as EnsureOrigin<_>>::try_successful_origin() .expect("EnsureSignedBy has no successful origin required for the test"); - assert_ok!(EnsureSignedBy::::try_origin(successful_origin)); + assert_ok!( as EnsureOrigin<_>>::try_origin(successful_origin)); } } diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 64dce5356f196..b79db3654b9f7 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for frame_system //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-s7kdgajz-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,8 +33,8 @@ // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=frame_system +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=frame-system // --chain=dev // --header=./HEADER-APACHE2 // --output=./frame/system/src/weights.rs @@ -43,6 +43,7 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; @@ -66,20 +67,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_344_000 picoseconds. - Weight::from_parts(2_471_000, 0) + // Minimum execution time: 2_004_000 picoseconds. + Weight::from_parts(2_119_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(364, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_815_000 picoseconds. - Weight::from_parts(9_140_000, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_122, 0).saturating_mul(b.into())) + // Minimum execution time: 8_032_000 picoseconds. + Weight::from_parts(8_097_000, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) } /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) @@ -89,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 5_233_000 picoseconds. - Weight::from_parts(5_462_000, 1485) + // Minimum execution time: 4_446_000 picoseconds. + Weight::from_parts(4_782_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -102,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 58_606_683_000 picoseconds. - Weight::from_parts(59_115_121_000, 1485) + // Minimum execution time: 84_000_503_000 picoseconds. + Weight::from_parts(87_586_619_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -114,10 +115,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_317_000 picoseconds. - Weight::from_parts(2_457_000, 0) - // Standard Error: 894 - .saturating_add(Weight::from_parts(750_850, 0).saturating_mul(i.into())) + // Minimum execution time: 2_086_000 picoseconds. + Weight::from_parts(2_175_000, 0) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -127,10 +128,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_552_000, 0) - // Standard Error: 1_027 - .saturating_add(Weight::from_parts(566_064, 0).saturating_mul(i.into())) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_255_000, 0) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -138,12 +139,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `116 + p * (69 ±0)` - // Estimated: `121 + p * (70 ±0)` - // Minimum execution time: 4_646_000 picoseconds. - Weight::from_parts(4_725_000, 121) - // Standard Error: 1_195 - .saturating_add(Weight::from_parts(1_144_884, 0).saturating_mul(p.into())) + // Measured: `115 + p * (69 ±0)` + // Estimated: `128 + p * (70 ±0)` + // Minimum execution time: 4_189_000 picoseconds. + Weight::from_parts(4_270_000, 128) + // Standard Error: 2_296 + .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -157,20 +158,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_344_000 picoseconds. - Weight::from_parts(2_471_000, 0) + // Minimum execution time: 2_004_000 picoseconds. + Weight::from_parts(2_119_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(364, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_815_000 picoseconds. - Weight::from_parts(9_140_000, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_122, 0).saturating_mul(b.into())) + // Minimum execution time: 8_032_000 picoseconds. + Weight::from_parts(8_097_000, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) } /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) @@ -180,8 +181,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 5_233_000 picoseconds. - Weight::from_parts(5_462_000, 1485) + // Minimum execution time: 4_446_000 picoseconds. + Weight::from_parts(4_782_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -193,8 +194,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 58_606_683_000 picoseconds. - Weight::from_parts(59_115_121_000, 1485) + // Minimum execution time: 84_000_503_000 picoseconds. + Weight::from_parts(87_586_619_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -205,10 +206,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_317_000 picoseconds. - Weight::from_parts(2_457_000, 0) - // Standard Error: 894 - .saturating_add(Weight::from_parts(750_850, 0).saturating_mul(i.into())) + // Minimum execution time: 2_086_000 picoseconds. + Weight::from_parts(2_175_000, 0) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -218,10 +219,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_552_000, 0) - // Standard Error: 1_027 - .saturating_add(Weight::from_parts(566_064, 0).saturating_mul(i.into())) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_255_000, 0) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: Skipped Metadata (r:0 w:0) @@ -229,12 +230,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `116 + p * (69 ±0)` - // Estimated: `121 + p * (70 ±0)` - // Minimum execution time: 4_646_000 picoseconds. - Weight::from_parts(4_725_000, 121) - // Standard Error: 1_195 - .saturating_add(Weight::from_parts(1_144_884, 0).saturating_mul(p.into())) + // Measured: `115 + p * (69 ±0)` + // Estimated: `128 + p * (70 ±0)` + // Minimum execution time: 4_189_000 picoseconds. + Weight::from_parts(4_270_000, 128) + // Standard Error: 2_296 + .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 0f28955259cce..c92fb6b75b1b9 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -14,21 +14,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "7.0.0", default-features = false, optional = true, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, optional = true, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../primitives/timestamp" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -44,6 +44,17 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-timestamp/std", + "sp-core/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "sp-io", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "sp-io"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 192c81502bf6e..4eb95941d7828 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -130,7 +130,7 @@ pub mod pallet { type Moment: Parameter + Default + AtLeast32Bit - + Scale + + Scale, Output = Self::Moment> + Copy + MaxEncodedLen + scale_info::StaticTypeInfo; @@ -151,7 +151,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(PhantomData); + pub struct Pallet(_); /// Current time for the current block. #[pallet::storage] diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 6f681788236c3..418d257b3f005 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -27,21 +27,17 @@ use frame_support::{ use sp_core::H256; use sp_io::TestExternalities; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type Moment = u64; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, } ); @@ -52,14 +48,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -100,7 +95,7 @@ pub(crate) fn get_captured_moment() -> Option { } pub(crate) fn new_test_ext() -> TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); clear_captured_moment(); TestExternalities::new(t) } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 1c254bf220076..46c5447348694 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_timestamp //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_timestamp. pub trait WeightInfo { @@ -63,8 +67,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `312` // Estimated: `1493` - // Minimum execution time: 10_834_000 picoseconds. - Weight::from_parts(11_099_000, 1493) + // Minimum execution time: 9_857_000 picoseconds. + Weight::from_parts(10_492_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -72,8 +76,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `0` - // Minimum execution time: 4_472_000 picoseconds. - Weight::from_parts(4_645_000, 0) + // Minimum execution time: 4_175_000 picoseconds. + Weight::from_parts(4_334_000, 0) } } @@ -87,8 +91,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `312` // Estimated: `1493` - // Minimum execution time: 10_834_000 picoseconds. - Weight::from_parts(11_099_000, 1493) + // Minimum execution time: 9_857_000 picoseconds. + Weight::from_parts(10_492_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -96,7 +100,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `161` // Estimated: `0` - // Minimum execution time: 4_472_000 picoseconds. - Weight::from_parts(4_645_000, 0) + // Minimum execution time: 4_175_000 picoseconds. + Weight::from_parts(4_334_000, 0) } } diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index caed4edec5f48..aed4f6f96176d 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +serde = { version = "1.0.163", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-storage = { version = "7.0.0", path = "../../primitives/storage" } +sp-storage = { version = "13.0.0", path = "../../primitives/storage" } [features] default = ["std"] @@ -45,10 +45,21 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-storage/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-treasury/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 613f684afdf56..4a991b11b9331 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -78,7 +78,7 @@ fn create_tips, I: 'static>( } Tips::::mutate(hash, |maybe_tip| { if let Some(open_tip) = maybe_tip { - open_tip.closes = Some(T::BlockNumber::zero()); + open_tip.closes = Some(frame_system::pallet_prelude::BlockNumberFor::::zero()); } }); Ok(()) diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 970e2ac152c4b..6e8f72e0540e6 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -74,6 +74,7 @@ use frame_support::{ }, Parameter, }; +use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; pub use weights::WeightInfo; @@ -143,7 +144,7 @@ pub mod pallet { /// The period for which a tip remains open after is has achieved threshold tippers. #[pallet::constant] - type TipCountdown: Get; + type TipCountdown: Get>; /// The percent of the final tip which goes to the original reporter of the tip. #[pallet::constant] @@ -173,7 +174,7 @@ pub mod pallet { _, Twox64Concat, T::Hash, - OpenTip, T::BlockNumber, T::Hash>, + OpenTip, BlockNumberFor, T::Hash>, OptionQuery, >; @@ -470,7 +471,7 @@ impl, I: 'static> Pallet { /// /// `O(T)` and one storage access. fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tip: &mut OpenTip, BlockNumberFor, T::Hash>, tipper: T::AccountId, tip_value: BalanceOf, ) -> bool { @@ -515,7 +516,7 @@ impl, I: 'static> Pallet { /// Plus `O(T)` (`T` is Tippers length). fn payout_tip( hash: T::Hash, - tip: OpenTip, T::BlockNumber, T::Hash>, + tip: OpenTip, BlockNumberFor, T::Hash>, ) { let mut tips = tip.tips; Self::retain_active_tips(&mut tips); @@ -577,7 +578,7 @@ impl, I: 'static> Pallet { for (hash, old_tip) in storage_key_iter::< T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, + OldOpenTip, BlockNumberFor, T::Hash>, Twox64Concat, >(module, item) .drain() diff --git a/frame/tips/src/migrations/mod.rs b/frame/tips/src/migrations/mod.rs index f7f144adcdb6e..9cdd01c17fbf6 100644 --- a/frame/tips/src/migrations/mod.rs +++ b/frame/tips/src/migrations/mod.rs @@ -21,3 +21,6 @@ /// before calling this migration. After calling this migration, it will get replaced with /// own storage identifier. pub mod v4; + +/// A migration that unreserves all funds held in the context of this pallet. +pub mod unreserve_deposits; diff --git a/frame/tips/src/migrations/unreserve_deposits.rs b/frame/tips/src/migrations/unreserve_deposits.rs new file mode 100644 index 0000000000000..16cb1a80e812b --- /dev/null +++ b/frame/tips/src/migrations/unreserve_deposits.rs @@ -0,0 +1,324 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A migration that unreserves all deposit and unlocks all stake held in the context of this +//! pallet. + +use core::iter::Sum; +use frame_support::{ + pallet_prelude::OptionQuery, + storage_alias, + traits::{Currency, LockableCurrency, OnRuntimeUpgrade, ReservableCurrency}, + weights::RuntimeDbWeight, + Parameter, Twox64Concat, +}; +use sp_runtime::{traits::Zero, Saturating}; +use sp_std::collections::btree_map::BTreeMap; + +#[cfg(feature = "try-runtime")] +const LOG_TARGET: &str = "runtime::tips::migrations::unreserve_deposits"; + +type BalanceOf = + <>::Currency as Currency<>::AccountId>>::Balance; + +/// The configuration for [`UnreserveDeposits`]. +pub trait UnlockConfig: 'static { + /// The hash used in the runtime. + type Hash: Parameter; + /// The account ID used in the runtime. + type AccountId: Parameter + Ord; + /// The currency type used in the runtime. + /// + /// Should match the currency type previously used for the pallet, if applicable. + type Currency: LockableCurrency + ReservableCurrency; + /// Base deposit to report a tip. + /// + /// Should match the currency type previously used for the pallet, if applicable. + type TipReportDepositBase: sp_core::Get>; + /// Deposit per byte to report a tip. + /// + /// Should match the currency type previously used for the pallet, if applicable. + type DataDepositPerByte: sp_core::Get>; + /// The name of the pallet as previously configured in + /// [`construct_runtime!`](frame_support::construct_runtime). + type PalletName: sp_core::Get<&'static str>; + /// The DB weight as configured in the runtime to calculate the correct weight. + type DbWeight: sp_core::Get; + /// The block number as configured in the runtime. + type BlockNumber: Parameter + Zero + Copy + Ord; +} + +/// An open tipping "motion". Retains all details of a tip including information on the finder +/// and the members who have voted. +#[storage_alias(dynamic)] +type Tips, I: 'static> = StorageMap< + >::PalletName, + Twox64Concat, + >::Hash, + crate::OpenTip< + >::AccountId, + BalanceOf, + >::BlockNumber, + >::Hash, + >, + OptionQuery, +>; + +/// A migration that unreserves all tip deposits. +/// +/// Useful to prevent funds from being locked up when the pallet is deprecated. +/// +/// The pallet should be made inoperable before or immediately after this migration is run. +/// +/// (See also the `RemovePallet` migration in `frame/support/src/migrations.rs`) +pub struct UnreserveDeposits, I: 'static>(sp_std::marker::PhantomData<(T, I)>); + +impl, I: 'static> UnreserveDeposits { + /// Calculates and returns the total amount reserved by each account by this pallet from open + /// tips. + /// + /// # Returns + /// + /// * `BTreeMap`: Map of account IDs to their respective total + /// reserved balance by this pallet + /// * `frame_support::weights::Weight`: The weight of this operation. + fn get_deposits() -> (BTreeMap>, frame_support::weights::Weight) { + use sp_core::Get; + + let mut tips_len = 0; + let account_deposits: BTreeMap> = Tips::::iter() + .map(|(_hash, open_tip)| open_tip) + .fold(BTreeMap::new(), |mut acc, tip| { + // Count the total number of tips + tips_len.saturating_inc(); + + // Add the balance to the account's existing deposit in the accumulator + acc.entry(tip.finder).or_insert(Zero::zero()).saturating_accrue(tip.deposit); + acc + }); + + (account_deposits, T::DbWeight::get().reads(tips_len)) + } +} + +impl, I: 'static> OnRuntimeUpgrade for UnreserveDeposits +where + BalanceOf: Sum, +{ + /// Gets the actual reserved amount for each account before the migration, performs integrity + /// checks and prints some summary information. + /// + /// Steps: + /// 1. Gets the deposited balances for each account stored in this pallet. + /// 2. Collects actual pre-migration reserved balances for each account. + /// 3. Checks the integrity of the deposited balances. + /// 4. Prints summary statistics about the state to be migrated. + /// 5. Returns the pre-migration actual reserved balance for each account that will + /// be part of the migration. + /// + /// Fails with a `TryRuntimeError` if somehow the amount reserved by this pallet is greater than + /// the actual total reserved amount for any accounts. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + use frame_support::ensure; + + // Get the Tips pallet view of balances it has reserved + let (account_deposits, _) = Self::get_deposits(); + + // Get the actual amounts reserved for accounts with open tips + let account_reserved_before: BTreeMap> = account_deposits + .keys() + .map(|account| (account.clone(), T::Currency::reserved_balance(&account))) + .collect(); + + // The deposit amount must be less than or equal to the reserved amount. + // If it is higher, there is either a bug with the pallet or a bug in the calculation of the + // deposit amount. + ensure!( + account_deposits.iter().all(|(account, deposit)| *deposit <= + *account_reserved_before.get(account).unwrap_or(&Zero::zero())), + "Deposit amount is greater than reserved amount" + ); + + // Print some summary stats + let total_deposits_to_unreserve = + account_deposits.clone().into_values().sum::>(); + log::info!(target: LOG_TARGET, "Total accounts: {}", account_deposits.keys().count()); + log::info!(target: LOG_TARGET, "Total amount to unreserve: {:?}", total_deposits_to_unreserve); + + // Return the actual amount reserved before the upgrade to verify integrity of the upgrade + // in the post_upgrade hook. + Ok(account_reserved_before.encode()) + } + + /// Executes the migration, unreserving funds that are locked in Tip deposits. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + use frame_support::traits::Get; + + // Get staked and deposited balances as reported by this pallet. + let (account_deposits, initial_reads) = Self::get_deposits(); + + // Deposited funds need to be unreserved. + for (account, unreserve_amount) in account_deposits.iter() { + if unreserve_amount.is_zero() { + continue + } + T::Currency::unreserve(&account, *unreserve_amount); + } + + T::DbWeight::get() + .reads_writes(account_deposits.len() as u64, account_deposits.len() as u64) + .saturating_add(initial_reads) + } + + /// Verifies that the account reserved balances were reduced by the actual expected amounts. + #[cfg(feature = "try-runtime")] + fn post_upgrade( + account_reserved_before_bytes: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + + let account_reserved_before = BTreeMap::>::decode( + &mut &account_reserved_before_bytes[..], + ) + .map_err(|_| "Failed to decode account_reserved_before_bytes")?; + + // Get deposited balances as reported by this pallet. + let (account_deposits, _) = Self::get_deposits(); + + // Check that the reserved balance is reduced by the expected deposited amount. + for (account, actual_reserved_before) in account_reserved_before { + let actual_reserved_after = T::Currency::reserved_balance(&account); + let expected_amount_deducted = *account_deposits + .get(&account) + .expect("account deposit must exist to be in account_reserved_before, qed"); + let expected_reserved_after = + actual_reserved_before.saturating_sub(expected_amount_deducted); + + if actual_reserved_after != expected_reserved_after { + log::error!( + target: LOG_TARGET, + "Reserved balance for {:?} is incorrect. actual before: {:?}, actual after, {:?}, expected deducted: {:?}", + account, + actual_reserved_before, + actual_reserved_after, + expected_amount_deducted + ); + return Err("Reserved balance is incorrect".into()) + } + } + + Ok(()) + } +} + +#[cfg(all(feature = "try-runtime", test))] +mod test { + use super::*; + use crate::{ + migrations::unreserve_deposits::UnreserveDeposits, + tests::{new_test_ext, Balances, RuntimeOrigin, Test, Tips}, + }; + use frame_support::{assert_ok, parameter_types, traits::TypedGet}; + use frame_system::pallet_prelude::BlockNumberFor; + use sp_core::ConstU64; + + parameter_types! { + const PalletName: &'static str = "Tips"; + } + + struct UnlockConfigImpl; + impl super::UnlockConfig<()> for UnlockConfigImpl { + type Currency = Balances; + type TipReportDepositBase = ConstU64<1>; + type DataDepositPerByte = ConstU64<1>; + type Hash = sp_core::H256; + type AccountId = u128; + type BlockNumber = BlockNumberFor; + type DbWeight = (); + type PalletName = PalletName; + } + + #[test] + fn unreserve_all_funds_works() { + let tipper_0 = 0; + let tipper_1 = 1; + let tipper_0_initial_reserved = 0; + let tipper_1_initial_reserved = 5; + let recipient = 100; + let tip_0_reason = b"what_is_really_not_awesome".to_vec(); + let tip_1_reason = b"pineapple_on_pizza".to_vec(); + new_test_ext().execute_with(|| { + // Set up + assert_ok!(::Currency::reserve( + &tipper_0, + tipper_0_initial_reserved + )); + assert_ok!(::Currency::reserve( + &tipper_1, + tipper_1_initial_reserved + )); + + // Make some tips + assert_ok!(Tips::report_awesome( + RuntimeOrigin::signed(tipper_0), + tip_0_reason.clone(), + recipient + )); + assert_ok!(Tips::report_awesome( + RuntimeOrigin::signed(tipper_1), + tip_1_reason.clone(), + recipient + )); + + // Verify the expected amount is reserved + assert_eq!( + ::Currency::reserved_balance(&tipper_0), + tipper_0_initial_reserved + + ::TipReportDepositBase::get() + + ::DataDepositPerByte::get() * + tip_0_reason.len() as u64 + ); + assert_eq!( + ::Currency::reserved_balance(&tipper_1), + tipper_1_initial_reserved + + ::TipReportDepositBase::get() + + ::DataDepositPerByte::get() * + tip_1_reason.len() as u64 + ); + + // Execute the migration + let bytes = match UnreserveDeposits::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + UnreserveDeposits::::on_runtime_upgrade(); + assert_ok!(UnreserveDeposits::::post_upgrade(bytes)); + + // Check the deposits were were unreserved + assert_eq!( + ::Currency::reserved_balance(&tipper_0), + tipper_0_initial_reserved + ); + assert_eq!( + ::Currency::reserved_balance(&tipper_1), + tipper_1_initial_reserved + ); + }); + } +} diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index b2d97de18312f..a700892d42703 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -21,16 +21,13 @@ use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, Permill, }; use sp_storage::Storage; use frame_support::{ - assert_noop, assert_ok, - pallet_prelude::GenesisBuild, - parameter_types, + assert_noop, assert_ok, parameter_types, storage::StoragePrefixedMap, traits::{ConstU32, ConstU64, SortedMembers, StorageVersion}, PalletId, @@ -39,19 +36,15 @@ use frame_support::{ use super::*; use crate::{self as pallet_tips, Event as TipEvent}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, - Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Treasury1: pallet_treasury::::{Pallet, Call, Storage, Config, Event}, Tips: pallet_tips::{Pallet, Call, Storage, Event}, Tips1: pallet_tips::::{Pallet, Call, Storage, Event}, } @@ -66,14 +59,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -99,7 +91,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -196,7 +188,7 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { system: frame_system::GenesisConfig::default(), balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, treasury: Default::default(), @@ -570,7 +562,7 @@ fn test_migration_v4() { #[test] fn genesis_funding_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let initial_funding = 100; pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. @@ -578,7 +570,9 @@ fn genesis_funding_works() { } .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + pallet_treasury::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); let mut t: sp_io::TestExternalities = t.into(); t.execute_with(|| { diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index ec5eef8c8bd81..ec6228667159d 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_tips //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_tips +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_tips. pub trait WeightInfo { @@ -68,10 +72,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 30_728_000 picoseconds. - Weight::from_parts(31_794_924, 3469) - // Standard Error: 171 - .saturating_add(Weight::from_parts(1_020, 0).saturating_mul(r.into())) + // Minimum execution time: 29_576_000 picoseconds. + Weight::from_parts(30_722_650, 3469) + // Standard Error: 192 + .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -83,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 29_183_000 picoseconds. - Weight::from_parts(30_017_000, 3686) + // Minimum execution time: 28_522_000 picoseconds. + Weight::from_parts(29_323_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -100,12 +104,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `526 + t * (64 ±0)` // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 20_726_000 picoseconds. - Weight::from_parts(20_964_411, 3991) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(r.into())) - // Standard Error: 2_837 - .saturating_add(Weight::from_parts(81_831, 0).saturating_mul(t.into())) + // Minimum execution time: 19_650_000 picoseconds. + Weight::from_parts(19_837_982, 3991) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) + // Standard Error: 3_588 + .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -119,10 +123,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `747 + t * (112 ±0)` // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 16_048_000 picoseconds. - Weight::from_parts(16_694_981, 4212) - // Standard Error: 4_480 - .saturating_add(Weight::from_parts(179_723, 0).saturating_mul(t.into())) + // Minimum execution time: 15_641_000 picoseconds. + Weight::from_parts(15_745_460, 4212) + // Standard Error: 5_106 + .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -140,10 +144,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `786 + t * (112 ±0)` // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 61_319_000 picoseconds. - Weight::from_parts(62_217_195, 4242) - // Standard Error: 6_721 - .saturating_add(Weight::from_parts(186_620, 0).saturating_mul(t.into())) + // Minimum execution time: 62_059_000 picoseconds. + Weight::from_parts(64_604_554, 4242) + // Standard Error: 11_818 + .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -157,10 +161,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 15_397_000 picoseconds. - Weight::from_parts(15_942_494, 3734) - // Standard Error: 1_657 - .saturating_add(Weight::from_parts(4_128, 0).saturating_mul(t.into())) + // Minimum execution time: 14_133_000 picoseconds. + Weight::from_parts(14_957_547, 3734) + // Standard Error: 2_765 + .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -177,10 +181,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 30_728_000 picoseconds. - Weight::from_parts(31_794_924, 3469) - // Standard Error: 171 - .saturating_add(Weight::from_parts(1_020, 0).saturating_mul(r.into())) + // Minimum execution time: 29_576_000 picoseconds. + Weight::from_parts(30_722_650, 3469) + // Standard Error: 192 + .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -192,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 29_183_000 picoseconds. - Weight::from_parts(30_017_000, 3686) + // Minimum execution time: 28_522_000 picoseconds. + Weight::from_parts(29_323_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -209,12 +213,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `526 + t * (64 ±0)` // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 20_726_000 picoseconds. - Weight::from_parts(20_964_411, 3991) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_230, 0).saturating_mul(r.into())) - // Standard Error: 2_837 - .saturating_add(Weight::from_parts(81_831, 0).saturating_mul(t.into())) + // Minimum execution time: 19_650_000 picoseconds. + Weight::from_parts(19_837_982, 3991) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) + // Standard Error: 3_588 + .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -228,10 +232,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `747 + t * (112 ±0)` // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 16_048_000 picoseconds. - Weight::from_parts(16_694_981, 4212) - // Standard Error: 4_480 - .saturating_add(Weight::from_parts(179_723, 0).saturating_mul(t.into())) + // Minimum execution time: 15_641_000 picoseconds. + Weight::from_parts(15_745_460, 4212) + // Standard Error: 5_106 + .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -249,10 +253,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `786 + t * (112 ±0)` // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 61_319_000 picoseconds. - Weight::from_parts(62_217_195, 4242) - // Standard Error: 6_721 - .saturating_add(Weight::from_parts(186_620, 0).saturating_mul(t.into())) + // Minimum execution time: 62_059_000 picoseconds. + Weight::from_parts(64_604_554, 4242) + // Standard Error: 11_818 + .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -266,10 +270,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 15_397_000 picoseconds. - Weight::from_parts(15_942_494, 3734) - // Standard Error: 1_657 - .saturating_add(Weight::from_parts(4_128, 0).saturating_mul(t.into())) + // Minimum execution time: 14_133_000 picoseconds. + Weight::from_parts(14_957_547, 3734) + // Standard Error: 2_765 + .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index b1e22d3e9145a..3f953d4eefe6e 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] serde_json = "1.0.85" @@ -41,5 +41,11 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml new file mode 100644 index 0000000000000..69118cec04bcf --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "pallet-asset-conversion-tx-payment" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Pallet to manage transaction payments in assets by converting them to native assets." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +pallet-asset-conversion = { version = "4.0.0-dev", default-features = false, path = "../../asset-conversion" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = ".." } +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +[dev-dependencies] +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-storage = { version = "13.0.0", default-features = false, path = "../../../primitives/storage" } +pallet-assets = { version = "4.0.0-dev", path = "../../assets" } +pallet-balances = { version = "4.0.0-dev", path = "../../balances" } + +[features] +default = ["std"] +std = [ + "scale-info/std", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "sp-io/std", + "sp-core/std", + "pallet-asset-conversion/std", + "pallet-transaction-payment/std", + "pallet-assets/std", + "pallet-balances/std", + "sp-storage/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-asset-conversion/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime" +] diff --git a/frame/transaction-payment/asset-conversion-tx-payment/README.md b/frame/transaction-payment/asset-conversion-tx-payment/README.md new file mode 100644 index 0000000000000..eccba773673e6 --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/README.md @@ -0,0 +1,21 @@ +# pallet-asset-conversion-tx-payment + +## Asset Conversion Transaction Payment Pallet + +This pallet allows runtimes that include it to pay for transactions in assets other than the +native token of the chain. + +### Overview +It does this by extending transactions to include an optional `AssetId` that specifies the asset +to be used for payment (defaulting to the native token on `None`). It expects an +[`OnChargeAssetTransaction`] implementation analogously to [`pallet-transaction-payment`]. The +included [`AssetConversionAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the fee +amount by converting the fee calculated by [`pallet-transaction-payment`] into the desired +asset. + +### Integration +This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means +you should include both pallets in your `construct_runtime` macro, but only include this +pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). + +License: Apache-2.0 diff --git a/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs new file mode 100644 index 0000000000000..c2d9ed56c7aa3 --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -0,0 +1,349 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Conversion Transaction Payment Pallet +//! +//! This pallet allows runtimes that include it to pay for transactions in assets other than the +//! chain's native asset. +//! +//! ## Overview +//! +//! This pallet provides a `SignedExtension` with an optional `AssetId` that specifies the asset +//! to be used for payment (defaulting to the native token on `None`). It expects an +//! [`OnChargeAssetTransaction`] implementation analogous to [`pallet-transaction-payment`]. The +//! included [`AssetConversionAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the +//! fee amount by converting the fee calculated by [`pallet-transaction-payment`] in the native +//! asset into the amount required of the specified asset. +//! +//! ## Pallet API +//! +//! This pallet does not have any dispatchable calls or storage. It wraps FRAME's Transaction +//! Payment pallet and functions as a replacement. This means you should include both pallets in +//! your `construct_runtime` macro, but only include this pallet's [`SignedExtension`] +//! ([`ChargeAssetTxPayment`]). +//! +//! ## Terminology +//! +//! - Native Asset or Native Currency: The asset that a chain considers native, as in its default +//! for transaction fee payment, deposits, inflation, etc. +//! - Other assets: Other assets that may exist on chain, for example under the Assets pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::prelude::*; + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + traits::{ + tokens::fungibles::{Balanced, Inspect}, + IsType, + }, + DefaultNoBound, +}; +use pallet_transaction_payment::OnChargeTransaction; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +mod payment; +use frame_support::traits::tokens::AssetId; +use pallet_asset_conversion::MultiAssetIdConverter; +pub use payment::*; + +/// Type aliases used for interaction with `OnChargeTransaction`. +pub(crate) type OnChargeTransactionOf = + ::OnChargeTransaction; +/// Balance type alias for balances of the chain's native asset. +pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; +/// Liquidity info type alias. +pub(crate) type LiquidityInfoOf = + as OnChargeTransaction>::LiquidityInfo; + +/// Balance type alias for balances of assets that implement the `fungibles` trait. +pub(crate) type AssetBalanceOf = + <::Fungibles as Inspect<::AccountId>>::Balance; +/// Type alias for Asset IDs. +pub(crate) type AssetIdOf = + <::Fungibles as Inspect<::AccountId>>::AssetId; + +/// Type alias for the interaction of balances with `OnChargeAssetTransaction`. +pub(crate) type ChargeAssetBalanceOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::Balance; +/// Type alias for Asset IDs in their interaction with `OnChargeAssetTransaction`. +pub(crate) type ChargeAssetIdOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId; +/// Liquidity info type alias for interaction with `OnChargeAssetTransaction`. +pub(crate) type ChargeAssetLiquidityOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::LiquidityInfo; + +/// Used to pass the initial payment info from pre- to post-dispatch. +#[derive(Encode, Decode, DefaultNoBound, TypeInfo)] +pub enum InitialPayment { + /// No initial fee was paid. + #[default] + Nothing, + /// The initial fee was paid in the native currency. + Native(LiquidityInfoOf), + /// The initial fee was paid in an asset. + Asset((LiquidityInfoOf, BalanceOf, AssetBalanceOf)), +} + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: + frame_system::Config + pallet_transaction_payment::Config + pallet_asset_conversion::Config + { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The fungibles instance used to pay for transactions in assets. + type Fungibles: Balanced; + /// The actual transaction charging logic that charges the fees. + type OnChargeAssetTransaction: OnChargeAssetTransaction; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee `actual_fee`, of which `tip` was added to the minimum inclusion fee, + /// has been paid by `who` in an asset `asset_id`. + AssetTxFeePaid { + who: T::AccountId, + actual_fee: AssetBalanceOf, + tip: BalanceOf, + asset_id: ChargeAssetIdOf, + }, + /// A swap of the refund in native currency back to asset failed. + AssetRefundFailed { native_amount_kept: BalanceOf }, + } +} + +/// Require payment for transaction inclusion and optionally include a tip to gain additional +/// priority in the queue. Allows paying via both `Currency` as well as `fungibles::Balanced`. +/// +/// Wraps the transaction logic in [`pallet_transaction_payment`] and extends it with assets. +/// An asset ID of `None` falls back to the underlying transaction payment logic via the native +/// currency. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ChargeAssetTxPayment { + #[codec(compact)] + tip: BalanceOf, + asset_id: Option>, +} + +impl ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + Into> + From>, + ChargeAssetIdOf: Send + Sync, +{ + /// Utility constructor. Used only in client/factory code. + pub fn from(tip: BalanceOf, asset_id: Option>) -> Self { + Self { tip, asset_id } + } + + /// Fee withdrawal logic that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok((fee, InitialPayment::Nothing)) + } else if let Some(asset_id) = &self.asset_id { + T::OnChargeAssetTransaction::withdraw_fee( + who, + call, + info, + asset_id.clone(), + fee.into(), + self.tip.into(), + ) + .map(|(used_for_fee, received_exchanged, asset_consumed)| { + ( + fee, + InitialPayment::Asset(( + used_for_fee.into(), + received_exchanged.into(), + asset_consumed.into(), + )), + ) + }) + } else { + as OnChargeTransaction>::withdraw_fee( + who, call, info, fee, self.tip, + ) + .map(|i| (fee, InitialPayment::Native(i))) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } +} + +impl sp_std::fmt::Debug for ChargeAssetTxPayment { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "ChargeAssetTxPayment<{:?}, {:?}>", self.tip, self.asset_id.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SignedExtension for ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + + Sync + + From + + Into> + + Into> + + From>, + ChargeAssetIdOf: Send + Sync, +{ + const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; + type AccountId = T::AccountId; + type Call = T::RuntimeCall; + type AdditionalSigned = (); + type Pre = ( + // tip + BalanceOf, + // who paid the fee + Self::AccountId, + // imbalance resulting from withdrawing the fee + InitialPayment, + // asset_id for the transaction payment + Option>, + ); + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + use pallet_transaction_payment::ChargeTransactionPayment; + let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); + Ok(ValidTransaction { priority, ..Default::default() }) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; + Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + if let Some((tip, who, initial_payment, asset_id)) = pre { + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + debug_assert!( + asset_id.is_none(), + "For that payment type the `asset_id` should be None" + ); + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + Some((tip, who, already_withdrawn)), + info, + post_info, + len, + result, + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + debug_assert!( + asset_id.is_some(), + "For that payment type the `asset_id` should be set" + ); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + + if let Some(asset_id) = asset_id { + let (used_for_fee, received_exchanged, asset_consumed) = already_withdrawn; + let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, + info, + post_info, + actual_fee.into(), + tip.into(), + used_for_fee.into(), + received_exchanged.into(), + asset_id.clone(), + asset_consumed.into(), + )?; + + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip, + asset_id, + }); + } + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, + } + } + + Ok(()) + } +} diff --git a/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs new file mode 100644 index 0000000000000..bfbe8b4178cee --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -0,0 +1,268 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_asset_conversion_tx_payment; + +use codec; +use frame_support::{ + dispatch::DispatchClass, + instances::Instance2, + ord_parameter_types, + pallet_prelude::*, + parameter_types, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64, ConstU8, Imbalance, OnUnbalanced}, + weights::{Weight, WeightToFee as WeightToFeeT}, + PalletId, +}; +use frame_system as system; +use frame_system::{EnsureRoot, EnsureSignedBy}; +use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; +use pallet_transaction_payment::CurrencyAdapter; +use sp_core::H256; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, SaturatedConversion}, + Permill, +}; + +type Block = frame_system::mocking::MockBlock; +type Balance = u64; +type AccountId = u64; + +frame_support::construct_runtime!( + pub enum Runtime + { + System: system, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Assets: pallet_assets, + PoolAssets: pallet_assets::, + AssetConversion: pallet_asset_conversion, + AssetTxPayment: pallet_asset_conversion_tx_payment, + } +); + +parameter_types! { + pub(crate) static ExtrinsicBaseWeight: Weight = Weight::zero(); +} + +pub struct BlockWeights; +impl Get for BlockWeights { + fn get() -> frame_system::limits::BlockWeights { + frame_system::limits::BlockWeights::builder() + .base_block(Weight::zero()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = Weight::from_parts(1024, u64::MAX).into(); + }) + .build_or_panic() + } +} + +parameter_types! { + pub static WeightToFee: u64 = 1; + pub static TransactionByteFee: u64 = 1; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 10; +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<10>; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type MaxHolds = (); +} + +impl WeightToFeeT for WeightToFee { + type Balance = u64; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(weight.ref_time()) + .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) + } +} + +impl WeightToFeeT for TransactionByteFee { + type Balance = u64; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(weight.ref_time()) + .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) + } +} + +parameter_types! { + pub(crate) static TipUnbalancedAmount: u64 = 0; + pub(crate) static FeeUnbalancedAmount: u64 = 0; +} + +pub struct DealWithFees; +impl OnUnbalanced> for DealWithFees { + fn on_unbalanceds( + mut fees_then_tips: impl Iterator>, + ) { + if let Some(fees) = fees_then_tips.next() { + FeeUnbalancedAmount::mutate(|a| *a += fees.peek()); + if let Some(tips) = fees_then_tips.next() { + TipUnbalancedAmount::mutate(|a| *a += tips.peek()); + } + } + } +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type WeightToFee = WeightToFee; + type LengthToFee = TransactionByteFee; + type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = ConstU8<5>; +} + +type AssetId = u32; + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetId; + type AssetIdParameter = codec::Compact; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU64<2>; + type AssetAccountDeposit = ConstU64<2>; + type MetadataDepositBase = ConstU64<0>; + type MetadataDepositPerByte = ConstU64<0>; + type ApprovalDeposit = ConstU64<0>; + type StringLimit = ConstU32<20>; + type Freezer = (); + type Extra = (); + type CallbackHandle = (); + type WeightInfo = (); + type RemoveItemsLimit = ConstU32<1000>; + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = u64; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = ConstU64<0>; + type AssetAccountDeposit = ConstU64<0>; + type MetadataDepositBase = ConstU64<0>; + type MetadataDepositPerByte = ConstU64<0>; + type ApprovalDeposit = ConstU64<0>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type CallbackHandle = (); + pallet_assets::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub storage AllowMultiAssetPools: bool = false; + // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero + pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); + pub const MaxSwapPathLength: u32 = 4; +} + +ord_parameter_types! { + pub const AssetConversionOrigin: u64 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +impl pallet_asset_conversion::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type AssetBalance = ::Balance; + type AssetId = u32; + type PoolAssetId = u32; + type Assets = Assets; + type PoolAssets = PoolAssets; + type PalletId = AssetConversionPalletId; + type WeightInfo = (); + type LPFee = ConstU32<3>; // means 0.3% + type PoolSetupFee = ConstU64<100>; // should be more or equal to the existential deposit + type PoolSetupFeeReceiver = AssetConversionOrigin; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type AllowMultiAssetPools = AllowMultiAssetPools; + type MaxSwapPathLength = MaxSwapPathLength; + type MintMinLiquidity = ConstU64<100>; // 100 is good enough when the main currency has 12 decimals. + + type Balance = u64; + type HigherPrecisionBalance = u128; + + type MultiAssetId = NativeOrAssetId; + type MultiAssetIdConverter = NativeOrAssetIdConverter; + + pallet_asset_conversion::runtime_benchmarks_enabled! { + type BenchmarkHelper = (); + } +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Fungibles = Assets; + type OnChargeAssetTransaction = AssetConversionAdapter; +} diff --git a/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs b/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs new file mode 100644 index 0000000000000..0d090211d0352 --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs @@ -0,0 +1,202 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! Traits and default implementation for paying transaction fees in assets. +use super::*; +use crate::Config; + +use frame_support::{ + ensure, + traits::{fungible::Inspect, tokens::Balance}, + unsigned::TransactionValidityError, +}; +use pallet_asset_conversion::Swap; +use sp_runtime::{ + traits::{DispatchInfoOf, PostDispatchInfoOf, Zero}, + transaction_validity::InvalidTransaction, + Saturating, +}; +use sp_std::marker::PhantomData; + +/// Handle withdrawing, refunding and depositing of transaction fees. +pub trait OnChargeAssetTransaction { + /// The underlying integer type in which fees are calculated. + type Balance: Balance; + /// The type used to identify the assets used for transaction payment. + type AssetId: AssetId; + /// The type used to store the intermediate values between pre- and post-dispatch. + type LiquidityInfo; + + /// Secure the payment of the transaction fees before the transaction is executed. + /// + /// Note: The `fee` already includes the `tip`. + fn withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result< + (LiquidityInfoOf, Self::LiquidityInfo, AssetBalanceOf), + TransactionValidityError, + >; + + /// Refund any overpaid fees and deposit the corrected amount. + /// The actual fee gets calculated once the transaction is executed. + /// + /// Note: The `fee` already includes the `tip`. + /// + /// Returns the fee and tip in the asset used for payment as (fee, tip). + fn correct_and_deposit_fee( + who: &T::AccountId, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + corrected_fee: Self::Balance, + tip: Self::Balance, + fee_paid: LiquidityInfoOf, + received_exchanged: Self::LiquidityInfo, + asset_id: Self::AssetId, + initial_asset_consumed: AssetBalanceOf, + ) -> Result, TransactionValidityError>; +} + +/// Implements the asset transaction for a balance to asset converter (implementing [`Swap`]). +/// +/// The converter is given the complete fee in terms of the asset used for the transaction. +pub struct AssetConversionAdapter(PhantomData<(C, CON)>); + +/// Default implementation for a runtime instantiating this pallet, an asset to native swapper. +impl OnChargeAssetTransaction for AssetConversionAdapter +where + T: Config, + C: Inspect<::AccountId>, + CON: Swap, + T::HigherPrecisionBalance: From> + TryInto>, + T::MultiAssetId: From>, + BalanceOf: IsType<::AccountId>>::Balance>, +{ + type Balance = BalanceOf; + type AssetId = AssetIdOf; + type LiquidityInfo = BalanceOf; + + /// Swap & withdraw the predicted fee from the transaction origin. + /// + /// Note: The `fee` already includes the `tip`. + /// + /// Returns the total amount in native currency received by exchanging the `asset_id` and the + /// amount in native currency used to pay the fee. + fn withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: BalanceOf, + tip: BalanceOf, + ) -> Result< + (LiquidityInfoOf, Self::LiquidityInfo, AssetBalanceOf), + TransactionValidityError, + > { + // convert the asset into native currency + let ed = C::minimum_balance(); + let native_asset_required = + if C::balance(&who) >= ed.saturating_add(fee.into()) { fee } else { fee + ed.into() }; + + let asset_consumed = CON::swap_tokens_for_exact_tokens( + who.clone(), + vec![asset_id.into(), T::MultiAssetIdConverter::get_native()], + T::HigherPrecisionBalance::from(native_asset_required), + None, + who.clone(), + true, + ) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?; + + let asset_consumed = asset_consumed + .try_into() + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?; + + ensure!(asset_consumed > Zero::zero(), InvalidTransaction::Payment); + + // charge the fee in native currency + ::withdraw_fee(who, call, info, fee, tip) + .map(|r| (r, native_asset_required, asset_consumed)) + } + + /// Correct the fee and swap the refund back to asset. + /// + /// Note: The `corrected_fee` already includes the `tip`. + /// Note: Is the ED wasn't needed, the `received_exchanged` will be equal to `fee_paid`, or + /// `fee_paid + ed` otherwise. + fn correct_and_deposit_fee( + who: &T::AccountId, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + corrected_fee: BalanceOf, + tip: BalanceOf, + fee_paid: LiquidityInfoOf, + received_exchanged: Self::LiquidityInfo, + asset_id: Self::AssetId, + initial_asset_consumed: AssetBalanceOf, + ) -> Result, TransactionValidityError> { + // Refund the native asset to the account that paid the fees (`who`). + // The `who` account will receive the "fee_paid - corrected_fee" refund. + ::correct_and_deposit_fee( + who, + dispatch_info, + post_info, + corrected_fee, + tip, + fee_paid, + )?; + + // calculate the refund in native asset, to swap back to the desired `asset_id` + let swap_back = received_exchanged.saturating_sub(corrected_fee); + let mut asset_refund = Zero::zero(); + if !swap_back.is_zero() { + // If this fails, the account might have dropped below the existential balance or there + // is not enough liquidity left in the pool. In that case we don't throw an error and + // the account will keep the native currency. + match CON::swap_exact_tokens_for_tokens( + who.clone(), // we already deposited the native to `who` + vec![ + T::MultiAssetIdConverter::get_native(), // we provide the native + asset_id.into(), // we want asset_id back + ], + T::HigherPrecisionBalance::from(swap_back), /* amount of the native asset to + * convert to `asset_id` */ + None, // no minimum amount back + who.clone(), // we will refund to `who` + false, // no need to keep alive + ) + .ok() + { + Some(acquired) => { + asset_refund = acquired + .try_into() + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?; + }, + None => { + Pallet::::deposit_event(Event::::AssetRefundFailed { + native_amount_kept: swap_back, + }); + }, + } + } + + let actual_paid = initial_asset_consumed.saturating_sub(asset_refund); + Ok(actual_paid) + } +} diff --git a/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs new file mode 100644 index 0000000000000..9e9b74a0ddb2e --- /dev/null +++ b/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -0,0 +1,708 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +use frame_support::{ + assert_ok, + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, + traits::{fungible::Inspect, fungibles::Mutate}, + weights::Weight, +}; +use frame_system as system; +use mock::{ExtrinsicBaseWeight, *}; +use pallet_asset_conversion::NativeOrAssetId; +use pallet_balances::Call as BalancesCall; +use sp_runtime::{traits::StaticLookup, BuildStorage}; + +const CALL: &::RuntimeCall = + &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); + +pub struct ExtBuilder { + balance_factor: u64, + base_weight: Weight, + byte_fee: u64, + weight_to_fee: u64, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + balance_factor: 1, + base_weight: Weight::from_parts(0, 0), + byte_fee: 1, + weight_to_fee: 1, + } + } +} + +impl ExtBuilder { + pub fn base_weight(mut self, base_weight: Weight) -> Self { + self.base_weight = base_weight; + self + } + pub fn balance_factor(mut self, factor: u64) -> Self { + self.balance_factor = factor; + self + } + fn set_constants(&self) { + ExtrinsicBaseWeight::mutate(|v| *v = self.base_weight); + TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); + WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_constants(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { + balances: if self.balance_factor > 0 { + vec![ + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 30 * self.balance_factor), + (4, 40 * self.balance_factor), + (5, 50 * self.balance_factor), + (6, 60 * self.balance_factor), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } +} + +/// create a transaction info struct from weight. Handy to avoid building the whole struct. +pub fn info_from_weight(w: Weight) -> DispatchInfo { + // pays_fee: Pays::Yes -- class: DispatchClass::Normal + DispatchInfo { weight: w, ..Default::default() } +} + +fn post_info_from_weight(w: Weight) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: Some(w), pays_fee: Default::default() } +} + +fn info_from_pays(p: Pays) -> DispatchInfo { + DispatchInfo { pays_fee: p, ..Default::default() } +} + +fn post_info_from_pays(p: Pays) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: None, pays_fee: p } +} + +fn default_post_info() -> PostDispatchInfo { + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } +} + +fn setup_lp(asset_id: u32, balance_factor: u64) { + let lp_provider = 5; + assert_ok!(Balances::force_set_balance( + RuntimeOrigin::root(), + lp_provider, + 10_000 * balance_factor + )); + let lp_provider_account = ::Lookup::unlookup(lp_provider); + assert_ok!(Assets::mint_into(asset_id.into(), &lp_provider_account, 10_000 * balance_factor)); + + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(asset_id); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(lp_provider), token_1, token_2)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider), + token_1, + token_2, + 1_000 * balance_factor, // 1 desired + 10_000 * balance_factor, // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider_account, + )); +} + +const WEIGHT_5: Weight = Weight::from_parts(5, 0); +const WEIGHT_50: Weight = Weight::from_parts(50, 0); +const WEIGHT_100: Weight = Weight::from_parts(100, 0); + +#[test] +fn transaction_payment_in_native_possible() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + let len = 10; + let pre = ChargeAssetTxPayment::::from(0, None) + .pre_dispatch(&1, CALL, &info_from_weight(WEIGHT_5), len) + .unwrap(); + let initial_balance = 10 * balance_factor; + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_5), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + + let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .pre_dispatch(&2, CALL, &info_from_weight(WEIGHT_100), len) + .unwrap(); + let initial_balance_for_2 = 20 * balance_factor; + + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_100), + &post_info_from_weight(WEIGHT_50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + }); +} + +#[test] +fn transaction_payment_in_asset_possible() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 1; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let len = 10; + let tx_weight = 5; + + setup_lp(asset_id, balance_factor); + + let fee_in_native = base_weight + tx_weight + len as u64; + let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(asset_id), + NativeOrAssetId::Native, + fee_in_native, + true, + ); + assert_eq!(input_quote, Some(201)); + + let fee_in_asset = input_quote.unwrap(); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + .unwrap(); + // assert that native balance is not used + assert_eq!(Balances::free_balance(caller), 10 * balance_factor); + + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_5), // estimated tx weight + &default_post_info(), // weight actually used == estimated + len, + &Ok(()) + )); + + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + assert_eq!(TipUnbalancedAmount::get(), 0); + assert_eq!(FeeUnbalancedAmount::get(), fee_in_native); + }); +} + +#[test] +fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 1; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let len = 10; + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)).pre_dispatch( + &caller, + CALL, + &info_from_weight(WEIGHT_5), + len, + ); + + // As there is no pool in the dex set up for this asset, conversion should fail. + assert!(pre.is_err()); + }); +} + +#[test] +fn transaction_payment_without_fee() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + let caller = 1; + + // create the asset + let asset_id = 1; + let balance = 1000; + let min_balance = 2; + + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance, + )); + + setup_lp(asset_id, balance_factor); + + // mint into the caller account + let beneficiary = ::Lookup::unlookup(caller); + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let weight = 5; + let len = 10; + let fee_in_native = base_weight + weight + len as u64; + let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(asset_id), + NativeOrAssetId::Native, + fee_in_native, + true, + ); + assert_eq!(input_quote, Some(201)); + + let fee_in_asset = input_quote.unwrap(); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + .unwrap(); + + // assert that native balance is not used + assert_eq!(Balances::free_balance(caller), 10 * balance_factor); + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + + let refund = AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(asset_id), + fee_in_native, + true, + ) + .unwrap(); + assert_eq!(refund, 199); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_5), + &post_info_from_pays(Pays::No), + len, + &Ok(()) + )); + + // caller should get refunded + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset + refund); + assert_eq!(Balances::free_balance(caller), 10 * balance_factor); + }); +} + +#[test] +fn asset_transaction_payment_with_tip_and_refund() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance, + )); + + setup_lp(asset_id, balance_factor); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 10000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let weight = 100; + let tip = 5; + let len = 10; + let fee_in_native = base_weight + weight + len as u64 + tip; + let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(asset_id), + NativeOrAssetId::Native, + fee_in_native, + true, + ); + assert_eq!(input_quote, Some(1206)); + + let fee_in_asset = input_quote.unwrap(); + let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) + .unwrap(); + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + + let final_weight = 50; + let expected_fee = fee_in_native - final_weight - tip; + let expected_token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(asset_id), + fee_in_native - expected_fee - tip, + true, + ) + .unwrap(); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_100), + &post_info_from_weight(WEIGHT_50), + len, + &Ok(()) + )); + + assert_eq!(TipUnbalancedAmount::get(), tip); + assert_eq!(FeeUnbalancedAmount::get(), expected_fee); + + // caller should get refunded + assert_eq!( + Assets::balance(asset_id, caller), + balance - fee_in_asset + expected_token_refund + ); + assert_eq!(Balances::free_balance(caller), 20 * balance_factor); + }); +} + +#[test] +fn payment_from_account_with_only_assets() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance, + )); + + setup_lp(asset_id, balance_factor); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + // assert that native balance is not necessary + assert_eq!(Balances::free_balance(caller), 0); + let weight = 5; + let len = 10; + + let fee_in_native = base_weight + weight + len as u64; + let ed = Balances::minimum_balance(); + let fee_in_asset = AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(asset_id), + NativeOrAssetId::Native, + fee_in_native + ed, + true, + ) + .unwrap(); + assert_eq!(fee_in_asset, 301); + + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + .unwrap(); + assert_eq!(Balances::free_balance(caller), ed); + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + + let refund = AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(asset_id), + ed, + true, + ) + .unwrap(); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(WEIGHT_5), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset + refund); + assert_eq!(Balances::free_balance(caller), 0); + + assert_eq!(TipUnbalancedAmount::get(), 0); + assert_eq!(FeeUnbalancedAmount::get(), fee_in_native); + }); +} + +#[test] +fn converted_fee_is_never_zero_if_input_fee_is_not() { + let base_weight = 1; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 1; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + setup_lp(asset_id, balance_factor); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let weight = 1; + let len = 1; + + // there will be no conversion when the fee is zero + { + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + .unwrap(); + // `Pays::No` implies there are no fees + assert_eq!(Assets::balance(asset_id, caller), balance); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_pays(Pays::No), + &post_info_from_pays(Pays::No), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + } + + // validate even a small fee gets converted to asset. + let fee_in_native = base_weight + weight + len as u64; + let fee_in_asset = AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(asset_id), + NativeOrAssetId::Native, + fee_in_native, + true, + ) + .unwrap(); + + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .unwrap(); + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_weight(Weight::from_parts(weight, 0)), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + }); +} + +#[test] +fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { + let base_weight = 1; + ExtBuilder::default() + .balance_factor(100) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 100; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let weight = 1; + let len = 1; + let fee = base_weight + weight + len as u64; + + // calculated fee is greater than 0 + assert!(fee > 0); + + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + .unwrap(); + // `Pays::No` implies no pre-dispatch fees + + assert_eq!(Assets::balance(asset_id, caller), balance); + + let (_tip, _who, initial_payment, _asset_id) = ⪯ + let not_paying = match initial_payment { + &InitialPayment::Nothing => true, + _ => false, + }; + assert!(not_paying, "initial payment should be Nothing if we pass Pays::No"); + + // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the + // initial fee) + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + Some(pre), + &info_from_pays(Pays::No), + &post_info_from_pays(Pays::Yes), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + }); +} + +#[test] +fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { + let base_weight = 1; + ExtBuilder::default() + .balance_factor(100) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 100; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + let weight = 1; + let len = 1; + ChargeAssetTxPayment::::pre_dispatch_unsigned( + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) + .unwrap(); + + assert_eq!(Assets::balance(asset_id, caller), balance); + + // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the + // initial fee) + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + None, + &info_from_weight(Weight::from_parts(weight, 0)), + &post_info_from_pays(Pays::Yes), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + }); +} diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index 574eb9e23742b..056880423f2a8 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } @@ -25,14 +25,14 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking", optional = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } [dev-dependencies] serde_json = "1.0.85" -sp-storage = { version = "7.0.0", default-features = false, path = "../../../primitives/storage" } +sp-storage = { version = "13.0.0", default-features = false, path = "../../../primitives/storage" } pallet-assets = { version = "4.0.0-dev", path = "../../assets" } pallet-authorship = { version = "4.0.0-dev", path = "../../authorship" } @@ -52,10 +52,25 @@ std = [ "sp-core/std", "pallet-transaction-payment/std", "frame-benchmarking?/std", + "pallet-assets/std", + "pallet-authorship/std", + "pallet-balances/std", + "sp-storage/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index 4e83d8b489b70..753fae747a37e 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -56,7 +56,6 @@ use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, - FixedPointOperand, }; #[cfg(test)] @@ -67,17 +66,17 @@ mod tests; mod payment; pub use payment::*; -// Type aliases used for interaction with `OnChargeTransaction`. +/// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = ::OnChargeTransaction; -// Balance type alias. +/// Balance type alias. pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; -// Liquity info type alias. +/// Liquidity info type alias. pub(crate) type LiquidityInfoOf = as OnChargeTransaction>::LiquidityInfo; -// Type alias used for interaction with fungibles (assets). -// Balance type alias. +/// Type alias used for interaction with fungibles (assets). +/// Balance type alias. pub(crate) type AssetBalanceOf = <::Fungibles as Inspect<::AccountId>>::Balance; /// Asset id type alias. @@ -85,25 +84,25 @@ pub(crate) type AssetIdOf = <::Fungibles as Inspect<::AccountId>>::AssetId; // Type aliases used for interaction with `OnChargeAssetTransaction`. -// Balance type alias. +/// Balance type alias. pub(crate) type ChargeAssetBalanceOf = <::OnChargeAssetTransaction as OnChargeAssetTransaction>::Balance; -// Asset id type alias. +/// Asset id type alias. pub(crate) type ChargeAssetIdOf = <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId; -// Liquity info type alias. +/// Liquidity info type alias. pub(crate) type ChargeAssetLiquidityOf = <::OnChargeAssetTransaction as OnChargeAssetTransaction>::LiquidityInfo; /// Used to pass the initial payment info from pre- to post-dispatch. #[derive(Encode, Decode, DefaultNoBound, TypeInfo)] pub enum InitialPayment { - /// No initial fee was payed. + /// No initial fee was paid. #[default] Nothing, - /// The initial fee was payed in the native currency. + /// The initial fee was paid in the native currency. Native(LiquidityInfoOf), - /// The initial fee was payed in an asset. + /// The initial fee was paid in an asset. Asset(Credit), } @@ -156,8 +155,8 @@ pub struct ChargeAssetTxPayment { impl ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, - AssetBalanceOf: Send + Sync + FixedPointOperand, - BalanceOf: Send + Sync + FixedPointOperand + IsType>, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, { @@ -213,8 +212,8 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { impl SignedExtension for ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, - AssetBalanceOf: Send + Sync + FixedPointOperand, - BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, { diff --git a/frame/transaction-payment/asset-tx-payment/src/mock.rs b/frame/transaction-payment/asset-tx-payment/src/mock.rs index be7baaf2b370e..b8d7b523ca258 100644 --- a/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -29,23 +29,14 @@ use frame_system as system; use frame_system::EnsureRoot; use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, ConvertInto, IdentityLookup, SaturatedConversion}, -}; +use sp_runtime::traits::{BlakeTwo256, ConvertInto, IdentityLookup, SaturatedConversion}; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type Balance = u64; type AccountId = u64; frame_support::construct_runtime!( - pub struct Runtime - where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub struct Runtime { System: system, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, @@ -85,14 +76,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -122,7 +112,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index 49e78fb8bce01..717114ab6bd03 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -126,7 +126,7 @@ where .max(min_converted_fee); let can_withdraw = >::can_withdraw(asset_id, who, converted_fee); - if !matches!(can_withdraw, WithdrawConsequence::Success) { + if can_withdraw != WithdrawConsequence::Success { return Err(InvalidTransaction::Payment.into()) } >::withdraw( diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index 2fee9c849f4b4..8df98ceda9971 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::traits::StaticLookup; +use sp_runtime::{traits::StaticLookup, BuildStorage}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -64,7 +64,7 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { self.set_constants(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: if self.balance_factor > 0 { vec![ diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index b9bf226e2da9e..28eb562e97d0c 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-weights = { version = "4.0.0", path = "../../../primitives/weights" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-weights = { version = "20.0.0", path = "../../../primitives/weights" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 854e4310b46dd..5cb7ebd7a2b8b 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../transaction-payment" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../../../primitives/weights" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../../primitives/runtime" } +sp-weights = { version = "20.0.0", default-features = false, path = "../../../../primitives/weights" } [features] default = ["std"] diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index f7bdc23a0b3f0..8160d72ad8942 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -50,6 +50,15 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use frame_support::{ + dispatch::{ + DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, + }, + traits::{Defensive, EstimateCallFee, Get}, + weights::{Weight, WeightToFee}, +}; +pub use pallet::*; +pub use payment::*; use sp_runtime::{ traits::{ Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion, @@ -58,17 +67,10 @@ use sp_runtime::{ transaction_validity::{ TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, }, - FixedPointNumber, FixedPointOperand, FixedU128, Perquintill, RuntimeDebug, + FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; use sp_std::prelude::*; - -use frame_support::{ - dispatch::{ - DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, - }, - traits::{EstimateCallFee, Get}, - weights::{Weight, WeightToFee}, -}; +pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; #[cfg(test)] mod mock; @@ -78,10 +80,6 @@ mod tests; mod payment; mod types; -pub use pallet::*; -pub use payment::*; -pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; - /// Fee multiplier. pub type Multiplier = FixedU128; @@ -108,10 +106,17 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction= v * k * (s - s')` @@ -207,15 +212,30 @@ where let normal_block_weight = current_block_weight.get(DispatchClass::Normal).min(normal_max_weight); - // TODO: Handle all weight dimensions - let normal_max_weight = normal_max_weight.ref_time(); - let normal_block_weight = normal_block_weight.ref_time(); - - let s = S::get(); - let v = V::get(); - - let target_weight = (s * normal_max_weight) as u128; - let block_weight = normal_block_weight as u128; + // Normalize dimensions so they can be compared. Ensure (defensive) max weight is non-zero. + let normalized_ref_time = Perbill::from_rational( + normal_block_weight.ref_time(), + normal_max_weight.ref_time().max(1), + ); + let normalized_proof_size = Perbill::from_rational( + normal_block_weight.proof_size(), + normal_max_weight.proof_size().max(1), + ); + + // Pick the limiting dimension. If the proof size is the limiting dimension, then the + // multiplier is adjusted by the proof size. Otherwise, it is adjusted by the ref time. + let (normal_limiting_dimension, max_limiting_dimension) = + if normalized_ref_time < normalized_proof_size { + (normal_block_weight.proof_size(), normal_max_weight.proof_size()) + } else { + (normal_block_weight.ref_time(), normal_max_weight.ref_time()) + }; + + let target_block_fullness = S::get(); + let adjustment_variable = V::get(); + + let target_weight = (target_block_fullness * max_limiting_dimension) as u128; + let block_weight = normal_limiting_dimension as u128; // determines if the first_term is positive let positive = block_weight >= target_weight; @@ -223,12 +243,13 @@ where // defensive only, a test case assures that the maximum weight diff can fit in Multiplier // without any saturation. - let diff = Multiplier::saturating_from_rational(diff_abs, normal_max_weight.max(1)); + let diff = Multiplier::saturating_from_rational(diff_abs, max_limiting_dimension.max(1)); let diff_squared = diff.saturating_mul(diff); - let v_squared_2 = v.saturating_mul(v) / Multiplier::saturating_from_integer(2); + let v_squared_2 = adjustment_variable.saturating_mul(adjustment_variable) / + Multiplier::saturating_from_integer(2); - let first_term = v.saturating_mul(diff); + let first_term = adjustment_variable.saturating_mul(diff); let second_term = v_squared_2.saturating_mul(diff_squared); if positive { @@ -290,10 +311,11 @@ const MULTIPLIER_DEFAULT_VALUE: Multiplier = Multiplier::from_u32(1); #[frame_support::pallet] pub mod pallet { - use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use super::*; + #[pallet::pallet] pub struct Pallet(_); @@ -358,18 +380,20 @@ pub mod pallet { pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; #[pallet::genesis_config] - pub struct GenesisConfig { + pub struct GenesisConfig { pub multiplier: Multiplier, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } - impl Default for GenesisConfig { + impl Default for GenesisConfig { fn default() -> Self { - Self { multiplier: MULTIPLIER_DEFAULT_VALUE } + Self { multiplier: MULTIPLIER_DEFAULT_VALUE, _config: Default::default() } } } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { StorageVersion::::put(Releases::V2); NextFeeMultiplier::::put(self.multiplier); @@ -386,12 +410,13 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_finalize(_: T::BlockNumber) { + fn on_finalize(_: frame_system::pallet_prelude::BlockNumberFor) { >::mutate(|fm| { *fm = T::FeeMultiplierUpdate::convert(*fm); }); } + #[cfg(feature = "std")] fn integrity_test() { // given weight == u64, we build multipliers from `diff` of two weight values, which can // at most be maximum block weight. Make sure that this can fit in a multiplier without @@ -417,33 +442,26 @@ pub mod pallet { return } - #[cfg(any(feature = "std", test))] - sp_io::TestExternalities::new_empty().execute_with(|| { - // This is the minimum value of the multiplier. Make sure that if we collapse to - // this value, we can recover with a reasonable amount of traffic. For this test we - // assert that if we collapse to minimum, the trend will be positive with a weight - // value which is 1% more than the target. - let min_value = T::FeeMultiplierUpdate::min(); - - let target = target + addition; - - >::set_block_consumed_resources(target, 0); - let next = T::FeeMultiplierUpdate::convert(min_value); - assert!( - next > min_value, - "The minimum bound of the multiplier is too low. When \ - block saturation is more than target by 1% and multiplier is minimal then \ - the multiplier doesn't increase." - ); - }); + // This is the minimum value of the multiplier. Make sure that if we collapse to this + // value, we can recover with a reasonable amount of traffic. For this test we assert + // that if we collapse to minimum, the trend will be positive with a weight value which + // is 1% more than the target. + let min_value = T::FeeMultiplierUpdate::min(); + let target = target + addition; + + >::set_block_consumed_resources(target, 0); + let next = T::FeeMultiplierUpdate::convert(min_value); + assert!( + next > min_value, + "The minimum bound of the multiplier is too low. When \ + block saturation is more than target by 1% and multiplier is minimal then \ + the multiplier doesn't increase." + ); } } } -impl Pallet -where - BalanceOf: FixedPointOperand, -{ +impl Pallet { /// Query the data that we know about the fee of a given `call`. /// /// This pallet is not and cannot be aware of the internals of a signed extension, for example @@ -628,7 +646,6 @@ where impl Convert> for Pallet where T: Config, - BalanceOf: FixedPointOperand, { /// Compute the fee for the specified weight. /// @@ -657,7 +674,7 @@ pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); impl ChargeTransactionPayment where T::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, + BalanceOf: Send + Sync, { /// utility constructor. Used only in client/factory code. pub fn from(fee: BalanceOf) -> Self { @@ -710,19 +727,20 @@ where tip: BalanceOf, final_fee: BalanceOf, ) -> TransactionPriority { - // Calculate how many such extrinsics we could fit into an empty block and take - // the limitting factor. + // Calculate how many such extrinsics we could fit into an empty block and take the + // limiting factor. let max_block_weight = T::BlockWeights::get().max_block; let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; - // TODO: Take into account all dimensions of weight - let max_block_weight = max_block_weight.ref_time(); - let info_weight = info.weight.ref_time(); - - let bounded_weight = info_weight.clamp(1, max_block_weight); + // bounded_weight is used as a divisor later so we keep it non-zero. + let bounded_weight = info.weight.max(Weight::from_parts(1, 1)).min(max_block_weight); let bounded_length = (len as u64).clamp(1, max_block_length); - let max_tx_per_block_weight = max_block_weight / bounded_weight; + // returns the scarce resource, i.e. the one that is limiting the number of transactions. + let max_tx_per_block_weight = max_block_weight + .checked_div_per_component(&bounded_weight) + .defensive_proof("bounded_weight is non-zero; qed") + .unwrap_or(1); let max_tx_per_block_length = max_block_length / bounded_length; // Given our current knowledge this value is going to be in a reasonable range - i.e. // less than 10^9 (2^30), so multiplying by the `tip` value is unlikely to overflow the @@ -778,7 +796,7 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { impl SignedExtension for ChargeTransactionPayment where - BalanceOf: Send + Sync + From + FixedPointOperand, + BalanceOf: Send + Sync + From, T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; @@ -844,7 +862,6 @@ where impl EstimateCallFee> for Pallet where - BalanceOf: FixedPointOperand, T::RuntimeCall: Dispatchable, { fn estimate_call_fee(call: &AnyCall, post_info: PostDispatchInfo) -> BalanceOf { diff --git a/frame/transaction-payment/src/mock.rs b/frame/transaction-payment/src/mock.rs index 741f094481c38..97253be463058 100644 --- a/frame/transaction-payment/src/mock.rs +++ b/frame/transaction-payment/src/mock.rs @@ -19,10 +19,7 @@ use super::*; use crate as pallet_transaction_payment; use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use frame_support::{ dispatch::DispatchClass, @@ -33,16 +30,12 @@ use frame_support::{ use frame_system as system; use pallet_balances::Call as BalancesCall; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub struct Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub struct Runtime { - System: system::{Pallet, Call, Config, Storage, Event}, + System: system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, } @@ -82,14 +75,13 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -115,7 +107,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } diff --git a/frame/transaction-payment/src/tests.rs b/frame/transaction-payment/src/tests.rs index d5109609e2975..d3a1721ccb990 100644 --- a/frame/transaction-payment/src/tests.rs +++ b/frame/transaction-payment/src/tests.rs @@ -20,12 +20,14 @@ use crate as pallet_transaction_payment; use codec::Encode; -use sp_runtime::{testing::TestXt, traits::One, transaction_validity::InvalidTransaction}; +use sp_runtime::{ + testing::TestXt, traits::One, transaction_validity::InvalidTransaction, BuildStorage, +}; use frame_support::{ assert_noop, assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, - traits::{Currency, GenesisBuild}, + traits::Currency, weights::Weight, }; use frame_system as system; @@ -80,7 +82,7 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { self.set_constants(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: if self.balance_factor > 0 { vec![ @@ -99,8 +101,9 @@ impl ExtBuilder { .unwrap(); if let Some(multiplier) = self.initial_multiplier { - let genesis = pallet::GenesisConfig { multiplier }; - GenesisBuild::::assimilate_storage(&genesis, &mut t).unwrap(); + pallet::GenesisConfig:: { multiplier, ..Default::default() } + .assimilate_storage(&mut t) + .unwrap(); } t.into() diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index aa19ce3fe2840..ae94e4a9bf013 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -13,28 +13,35 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "4.1", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +array-bytes = { version = "6.1", optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true } +serde = { version = "1.0.163", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-storage-proof" } log = { version = "0.4.17", default-features = false } [dev-dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = true, path = "../../primitives/transaction-storage-proof" } [features] default = ["std"] -runtime-benchmarks = ["array-bytes", "frame-benchmarking/runtime-benchmarks"] +runtime-benchmarks = [ + "array-bytes", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] std = [ "log/std", "frame-benchmarking?/std", @@ -49,5 +56,11 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-transaction-storage-proof/std", + "sp-core/std" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index dfea3331569f9..fdbaeb1f95181 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; use frame_support::traits::{Currency, Get, OnFinalize, OnInitialize}; -use frame_system::{EventRecord, Pallet as System, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, One, Zero}; use sp_std::*; use sp_transaction_storage_proof::TransactionStorageProof; @@ -113,7 +113,7 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -pub fn run_to_block(n: T::BlockNumber) { +pub fn run_to_block(n: frame_system::pallet_prelude::BlockNumberFor) { while frame_system::Pallet::::block_number() < n { crate::Pallet::::on_finalize(frame_system::Pallet::::block_number()); frame_system::Pallet::::on_finalize(frame_system::Pallet::::block_number()); @@ -144,7 +144,7 @@ benchmarks! { vec![0u8; T::MaxTransactionSize::get() as usize], )?; run_to_block::(1u32.into()); - }: _(RawOrigin::Signed(caller.clone()), T::BlockNumber::zero(), 0) + }: _(RawOrigin::Signed(caller.clone()), BlockNumberFor::::zero(), 0) verify { assert_last_event::(Event::Renewed { index: 0 }.into()); } @@ -159,7 +159,7 @@ benchmarks! { vec![0u8; T::MaxTransactionSize::get() as usize], )?; } - run_to_block::(StoragePeriod::::get() + T::BlockNumber::one()); + run_to_block::(StoragePeriod::::get() + BlockNumberFor::::one()); let encoded_proof = proof(); let proof = TransactionStorageProof::decode(&mut &*encoded_proof).unwrap(); }: check_proof(RawOrigin::None, proof) diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index b99bc49fc5b6f..e784d20a0cfd7 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -145,7 +145,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { // Drop obsolete roots. The proof for `obsolete` will be checked later // in this block, so we drop `obsolete` - 1. let period = >::get(); @@ -158,7 +158,7 @@ pub mod pallet { T::DbWeight::get().reads_writes(2, 4) } - fn on_finalize(n: T::BlockNumber) { + fn on_finalize(n: BlockNumberFor) { assert!( >::take() || { // Proof is not required for early or empty blocks. @@ -238,7 +238,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::renew())] pub fn renew( origin: OriginFor, - block: T::BlockNumber, + block: BlockNumberFor, index: u32, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; @@ -342,7 +342,7 @@ pub mod pallet { pub(super) type Transactions = StorageMap< _, Blake2_128Concat, - T::BlockNumber, + BlockNumberFor, BoundedVec, OptionQuery, >; @@ -350,7 +350,7 @@ pub mod pallet { /// Count indexed chunks for each block. #[pallet::storage] pub(super) type ChunkCount = - StorageMap<_, Blake2_128Concat, T::BlockNumber, u32, ValueQuery>; + StorageMap<_, Blake2_128Concat, BlockNumberFor, u32, ValueQuery>; #[pallet::storage] #[pallet::getter(fn byte_fee)] @@ -365,7 +365,7 @@ pub mod pallet { /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` /// for block authoring. #[pallet::storage] - pub(super) type StoragePeriod = StorageValue<_, T::BlockNumber, ValueQuery>; + pub(super) type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; // Intermediates #[pallet::storage] @@ -380,7 +380,7 @@ pub mod pallet { pub struct GenesisConfig { pub byte_fee: BalanceOf, pub entry_fee: BalanceOf, - pub storage_period: T::BlockNumber, + pub storage_period: BlockNumberFor, } impl Default for GenesisConfig { @@ -394,7 +394,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { >::put(&self.byte_fee); >::put(&self.entry_fee); diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 3a87d8eaea707..243e26b559053 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -24,22 +24,17 @@ use crate::{ use frame_support::traits::{ConstU16, ConstU32, ConstU64, OnFinalize, OnInitialize}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; pub type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, TransactionStorage: pallet_transaction_storage::{ Pallet, Call, Storage, Config, Inherent, Event @@ -53,13 +48,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -86,7 +80,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -101,7 +95,7 @@ impl pallet_transaction_storage::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { + let t = RuntimeGenesisConfig { system: Default::default(), balances: pallet_balances::GenesisConfig:: { balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)], diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 5103ac375e97a..519317177c492 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_transaction_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_transaction_storage +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_transaction_storage. pub trait WeightInfo { @@ -67,10 +71,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `176` // Estimated: `38351` - // Minimum execution time: 36_983_000 picoseconds. - Weight::from_parts(37_296_000, 38351) - // Standard Error: 2 - .saturating_add(Weight::from_parts(4_908, 0).saturating_mul(l.into())) + // Minimum execution time: 34_844_000 picoseconds. + Weight::from_parts(35_489_000, 38351) + // Standard Error: 11 + .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -86,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `40351` - // Minimum execution time: 44_637_000 picoseconds. - Weight::from_parts(45_464_000, 40351) + // Minimum execution time: 48_244_000 picoseconds. + Weight::from_parts(50_939_000, 40351) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `37145` // Estimated: `40351` - // Minimum execution time: 59_653_000 picoseconds. - Weight::from_parts(61_068_000, 40351) + // Minimum execution time: 80_913_000 picoseconds. + Weight::from_parts(84_812_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -125,10 +129,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `176` // Estimated: `38351` - // Minimum execution time: 36_983_000 picoseconds. - Weight::from_parts(37_296_000, 38351) - // Standard Error: 2 - .saturating_add(Weight::from_parts(4_908, 0).saturating_mul(l.into())) + // Minimum execution time: 34_844_000 picoseconds. + Weight::from_parts(35_489_000, 38351) + // Standard Error: 11 + .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -144,8 +148,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `40351` - // Minimum execution time: 44_637_000 picoseconds. - Weight::from_parts(45_464_000, 40351) + // Minimum execution time: 48_244_000 picoseconds. + Weight::from_parts(50_939_000, 40351) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -163,8 +167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `37145` // Estimated: `40351` - // Minimum execution time: 59_653_000 picoseconds. - Weight::from_parts(61_068_000, 40351) + // Minimum execution time: 80_913_000 picoseconds. + Weight::from_parts(84_812_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index f3c06735f8871..f692f204b9b3c 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -13,23 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +serde = { version = "1.0.163", features = ["derive"], optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } pallet-utility = { version = "4.0.0-dev", path = "../utility" } [features] @@ -44,10 +44,22 @@ std = [ "serde", "sp-runtime/std", "sp-std/std", + "pallet-utility/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-utility/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index a3761083e4faa..b8a53e06f2092 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -47,8 +47,10 @@ fn setup_proposal, I: 'static>( fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { for i in 0..n { let (caller, value, lookup) = setup_proposal::(i); + #[allow(deprecated)] Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; let proposal_id = >::get() - 1; + #[allow(deprecated)] Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; } ensure!(>::get().len() == n as usize, "Not all approved"); @@ -93,6 +95,7 @@ benchmarks_instance_pallet! { reject_proposal { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + #[allow(deprecated)] Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, @@ -107,6 +110,7 @@ benchmarks_instance_pallet! { let p in 0 .. T::MaxApprovals::get() - 1; create_approved_proposals::(p)?; let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + #[allow(deprecated)] Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, @@ -119,12 +123,14 @@ benchmarks_instance_pallet! { remove_approval { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + #[allow(deprecated)] Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; + #[allow(deprecated)] Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -135,7 +141,7 @@ benchmarks_instance_pallet! { setup_pot_account::(); create_approved_proposals::(p)?; }: { - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::zero()); } impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 847ff96a7a78b..730fae2a4e92c 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -175,7 +175,7 @@ pub mod pallet { /// Period between successive spends. #[pallet::constant] - type SpendPeriod: Get; + type SpendPeriod: Get>; /// Percentage of spare funds (if any) that are burnt per spend period. #[pallet::constant] @@ -234,25 +234,14 @@ pub mod pallet { StorageValue<_, BoundedVec, ValueQuery>; #[pallet::genesis_config] - #[derive(Default)] - pub struct GenesisConfig; - - #[cfg(feature = "std")] - impl GenesisConfig { - /// Direct implementation of `GenesisBuild::assimilate_storage`. - #[deprecated( - note = "use ` as GenesisBuild>::assimilate_storage` instead" - )] - pub fn assimilate_storage, I: 'static>( - &self, - storage: &mut sp_runtime::Storage, - ) -> Result<(), String> { - >::assimilate_storage(self, storage) - } + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig, I: 'static = ()> { + #[serde(skip)] + _config: sp_std::marker::PhantomData<(T, I)>, } #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { + impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { // Create Treasury account let account_id = >::account_id(); @@ -310,7 +299,7 @@ pub mod pallet { impl, I: 'static> Hooks> for Pallet { /// ## Complexity /// - `O(A)` where `A` is the number of approvals - fn on_initialize(n: T::BlockNumber) -> Weight { + fn on_initialize(n: frame_system::pallet_prelude::BlockNumberFor) -> Weight { let pot = Self::pot(); let deactivated = Deactivated::::get(); if pot != deactivated { @@ -347,6 +336,10 @@ pub mod pallet { /// - O(1) #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::propose_spend())] + #[allow(deprecated)] + #[deprecated( + note = "`propose_spend` will be removed in February 2024. Use `spend` instead." + )] pub fn propose_spend( origin: OriginFor, #[pallet::compact] value: BalanceOf, @@ -375,6 +368,10 @@ pub mod pallet { /// - O(1) #[pallet::call_index(1)] #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] + #[allow(deprecated)] + #[deprecated( + note = "`reject_proposal` will be removed in February 2024. Use `spend` instead." + )] pub fn reject_proposal( origin: OriginFor, #[pallet::compact] proposal_id: ProposalIndex, @@ -403,6 +400,10 @@ pub mod pallet { /// - O(1). #[pallet::call_index(2)] #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] + #[allow(deprecated)] + #[deprecated( + note = "`approve_proposal` will be removed in February 2024. Use `spend` instead." + )] pub fn approve_proposal( origin: OriginFor, #[pallet::compact] proposal_id: ProposalIndex, diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 67b21ff6252a8..ba45d5f6ff16f 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -21,14 +21,12 @@ use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, Dispatchable, IdentityLookup}, + BuildStorage, }; use frame_support::{ - assert_err_ignore_postinfo, assert_noop, assert_ok, - pallet_prelude::GenesisBuild, - parameter_types, + assert_err_ignore_postinfo, assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, OnInitialize}, PalletId, }; @@ -36,20 +34,16 @@ use frame_support::{ use super::*; use crate as treasury; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; type UtilityCall = pallet_utility::Call; type TreasuryCall = crate::Call; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Treasury: treasury::{Pallet, Call, Storage, Config, Event}, + Treasury: treasury::{Pallet, Call, Storage, Config, Event}, Utility: pallet_utility, } ); @@ -60,14 +54,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -92,7 +85,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -147,14 +140,14 @@ impl Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], } .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); + crate::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); t.into() } @@ -223,7 +216,10 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -232,7 +228,10 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -242,7 +241,10 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), + { + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) + }, Error::::InsufficientProposersBalance, ); }); @@ -253,8 +255,14 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -280,8 +288,14 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -294,10 +308,19 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }, Error::::InvalidIndex ); }); @@ -307,7 +330,10 @@ fn reject_already_rejected_spend_proposal_fails() { fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }, Error::::InvalidIndex ); }); @@ -317,7 +343,10 @@ fn reject_non_existent_spend_proposal_fails() { fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }, Error::::InvalidIndex ); }); @@ -328,10 +357,19 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::reject_proposal(RuntimeOrigin::root(), 0) + }); assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }, Error::::InvalidIndex ); }); @@ -343,8 +381,14 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -358,8 +402,14 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -380,14 +430,26 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 1) + }); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -399,7 +461,7 @@ fn treasury_account_doesnt_get_deleted() { // This is useful for chain that will just update runtime. #[test] fn inexistent_account_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } .assimilate_storage(&mut t) .unwrap(); @@ -410,10 +472,22 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 1) + }); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -431,7 +505,7 @@ fn inexistent_account_works() { #[test] fn genesis_funding_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let initial_funding = 100; pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. @@ -439,7 +513,7 @@ fn genesis_funding_works() { } .assimilate_storage(&mut t) .unwrap(); - GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); + crate::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); t.execute_with(|| { @@ -455,14 +529,26 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); } // One too many will fail - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + { + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }, Error::::TooManyApprovals ); }); @@ -473,8 +559,14 @@ fn remove_already_removed_approval_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!({ + #[allow(deprecated)] + Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) + }); + assert_ok!({ + #[allow(deprecated)] + Treasury::approve_proposal(RuntimeOrigin::root(), 0) + }); assert_eq!(Treasury::approvals(), vec![0]); assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index edf1a674f73ff..8f1418f76d969 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_treasury //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_treasury +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_treasury. pub trait WeightInfo { @@ -67,10 +71,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `1887` - // Minimum execution time: 16_592_000 picoseconds. - Weight::from_parts(16_959_000, 1887) + // Minimum execution time: 15_057_000 picoseconds. + Weight::from_parts(15_803_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -80,10 +84,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `177` // Estimated: `1489` - // Minimum execution time: 29_742_000 picoseconds. - Weight::from_parts(30_359_000, 1489) + // Minimum execution time: 28_923_000 picoseconds. + Weight::from_parts(29_495_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -93,10 +97,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `335` // Estimated: `3593` - // Minimum execution time: 31_248_000 picoseconds. - Weight::from_parts(31_882_000, 3593) + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(30_986_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -107,12 +111,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` + // Measured: `504 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 10_441_000 picoseconds. - Weight::from_parts(13_061_079, 3573) - // Standard Error: 877 - .saturating_add(Weight::from_parts(26_940, 0).saturating_mul(p.into())) + // Minimum execution time: 9_320_000 picoseconds. + Weight::from_parts(12_606_599, 3573) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -120,10 +124,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `161` // Estimated: `1887` - // Minimum execution time: 7_935_000 picoseconds. - Weight::from_parts(8_153_000, 1887) + // Minimum execution time: 7_231_000 picoseconds. + Weight::from_parts(7_459_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,12 +144,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `387 + p * (251 ±0)` + // Measured: `421 + p * (251 ±0)` // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 45_306_000 picoseconds. - Weight::from_parts(53_639_830, 1887) - // Standard Error: 32_330 - .saturating_add(Weight::from_parts(38_930_307, 0).saturating_mul(p.into())) + // Minimum execution time: 44_769_000 picoseconds. + Weight::from_parts(57_915_572, 1887) + // Standard Error: 59_484 + .saturating_add(Weight::from_parts(42_343_732, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -164,10 +168,10 @@ impl WeightInfo for () { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `1887` - // Minimum execution time: 16_592_000 picoseconds. - Weight::from_parts(16_959_000, 1887) + // Minimum execution time: 15_057_000 picoseconds. + Weight::from_parts(15_803_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -177,10 +181,10 @@ impl WeightInfo for () { /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `177` // Estimated: `1489` - // Minimum execution time: 29_742_000 picoseconds. - Weight::from_parts(30_359_000, 1489) + // Minimum execution time: 28_923_000 picoseconds. + Weight::from_parts(29_495_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -190,10 +194,10 @@ impl WeightInfo for () { /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `335` // Estimated: `3593` - // Minimum execution time: 31_248_000 picoseconds. - Weight::from_parts(31_882_000, 3593) + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(30_986_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -204,12 +208,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` + // Measured: `504 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 10_441_000 picoseconds. - Weight::from_parts(13_061_079, 3573) - // Standard Error: 877 - .saturating_add(Weight::from_parts(26_940, 0).saturating_mul(p.into())) + // Minimum execution time: 9_320_000 picoseconds. + Weight::from_parts(12_606_599, 3573) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -217,10 +221,10 @@ impl WeightInfo for () { /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `161` // Estimated: `1887` - // Minimum execution time: 7_935_000 picoseconds. - Weight::from_parts(8_153_000, 1887) + // Minimum execution time: 7_231_000 picoseconds. + Weight::from_parts(7_459_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -237,12 +241,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `387 + p * (251 ±0)` + // Measured: `421 + p * (251 ±0)` // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 45_306_000 picoseconds. - Weight::from_parts(53_639_830, 1887) - // Standard Error: 32_330 - .saturating_add(Weight::from_parts(38_930_307, 0).saturating_mul(p.into())) + // Minimum execution time: 44_769_000 picoseconds. + Weight::from_parts(57_915_572, 1887) + // Standard Error: 59_484 + .saturating_add(Weight::from_parts(42_343_732, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index 042dba5ed88ad..f9b99ba598994 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -12,11 +12,11 @@ description = "FRAME pallet for democracy" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"]} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [features] default = [ "std" ] @@ -29,4 +29,5 @@ std = [ ] try-runtime = [ "frame-support/try-runtime", + "sp-runtime/try-runtime" ] diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index f88a862daba48..9ddcef1e7b10b 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } -sp-std = { version = "5.0.0", path = "../../primitives/std" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } +sp-std = { version = "8.0.0", path = "../../primitives/std" } [features] default = ["std"] @@ -39,10 +39,20 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-balances/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 681ad06a84b83..1977c23f67e5e 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -25,6 +25,15 @@ use frame_support::{ use sp_runtime::{DispatchError, DispatchResult}; impl, I: 'static> Pallet { + /// Perform a transfer of an item from one account to another within a collection. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - The collection or item does not exist + /// ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The collection is frozen, and no transfers are allowed ([`Frozen`](crate::Error::Frozen)). + /// - The item is locked, and transfers are not permitted ([`Locked`](crate::Error::Locked)). + /// - The `with_details` closure returns an error. pub fn do_transfer( collection: T::CollectionId, item: T::ItemId, @@ -49,8 +58,8 @@ impl, I: 'static> Pallet { let origin = details.owner; details.owner = dest; - // The approved account has to be reset to None, because otherwise pre-approve attack would - // be possible, where the owner can approve their second account before making the + // The approved account has to be reset to `None`, because otherwise pre-approve attack + // would be possible, where the owner can approve their second account before making the // transaction and then claiming the item back. details.approved = None; @@ -66,6 +75,12 @@ impl, I: 'static> Pallet { Ok(()) } + /// Create a new collection with the provided details. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - If the collection ID is already in use ([`InUse`](crate::Error::InUse)). + /// - If reserving the deposit fails (e.g., insufficient funds). pub fn do_create_collection( collection: T::CollectionId, owner: T::AccountId, @@ -99,6 +114,15 @@ impl, I: 'static> Pallet { Ok(()) } + /// Destroy a collection along with its associated items and metadata. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - The collection does not exist ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The provided witness does not match the actual counts + /// ([`BadWitness`](crate::Error::BadWitness)). + /// - The caller is not the owner of the collection + /// ([`NoPermission`](crate::Error::NoPermission)). pub fn do_destroy_collection( collection: T::CollectionId, witness: DestroyWitness, @@ -141,6 +165,18 @@ impl, I: 'static> Pallet { }) } + /// Mint (create) a new item within a collection and assign ownership to an account. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - The item already exists in the collection + /// ([`AlreadyExists`](crate::Error::AlreadyExists)). + /// - The collection does not exist ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The provided closure `with_details` returns an error. + /// - The collection has reached its maximum supply + /// ([`MaxSupplyReached`](crate::Error::MaxSupplyReached)). + /// - An arithmetic overflow occurs when incrementing the number of items in the collection. + /// - The currency reserve operation for the item deposit fails for any reason. pub fn do_mint( collection: T::CollectionId, item: T::ItemId, @@ -187,6 +223,14 @@ impl, I: 'static> Pallet { Ok(()) } + /// Burn (destroy) an item from a collection. + /// + /// # Errors + /// This function returns a `Dispatch` error in the following cases: + /// - The item is locked and burns are not permitted ([`Locked`](crate::Error::Locked)). + /// - The collection or item does not exist + /// ([`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The `with_details` closure returns an error. pub fn do_burn( collection: T::CollectionId, item: T::ItemId, @@ -218,6 +262,13 @@ impl, I: 'static> Pallet { Ok(()) } + /// Set or remove the price for an item in a collection. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - The item or collection does not exist ([`UnknownItem`](crate::Error::UnknownItem) or + /// [`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The sender is not the owner of the item ([`NoPermission`](crate::Error::NoPermission)). pub fn do_set_price( collection: T::CollectionId, item: T::ItemId, @@ -244,6 +295,19 @@ impl, I: 'static> Pallet { Ok(()) } + /// Buy an item from a collection. + /// + /// # Errors + /// This function returns a dispatch error in the following cases: + /// - The item or collection does not exist ([`UnknownItem`](crate::Error::UnknownItem) or + /// [`UnknownCollection`](crate::Error::UnknownCollection)). + /// - The buyer is the current owner of the item ([`NoPermission`](crate::Error::NoPermission)). + /// - The item is not for sale ([`NotForSale`](crate::Error::NotForSale)). + /// - The bid price is lower than the item's sale price + /// ([`BidTooLow`](crate::Error::BidTooLow)). + /// - The item is set to be sold only to a specific buyer, and the provided buyer is not the + /// whitelisted buyer ([`NoPermission`](crate::Error::NoPermission)). + /// - The currency transfer between the buyer and the owner fails for any reason. pub fn do_buy_item( collection: T::CollectionId, item: T::ItemId, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 72ec02cf1f633..1b75d0b078ba5 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -43,11 +43,8 @@ pub mod migration; pub mod weights; use codec::{Decode, Encode}; -use frame_support::{ - traits::{ - tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, - }, - transactional, +use frame_support::traits::{ + tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, }; use frame_system::Config as SystemConfig; use sp_runtime::{ @@ -60,8 +57,10 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; +/// The log target for this pallet. const LOG_TARGET: &str = "runtime::uniques"; +/// A type alias for the account ID type used in the dispatchable functions of this pallet. type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] @@ -1523,7 +1522,6 @@ pub mod pallet { /// Emits `ItemBought` on success. #[pallet::call_index(25)] #[pallet::weight(T::WeightInfo::buy_item())] - #[transactional] pub fn buy_item( origin: OriginFor, collection: T::CollectionId, diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index bad393a489582..5c44a7ed7a539 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -26,20 +26,16 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, } @@ -51,13 +47,12 @@ impl frame_system::Config for Test { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -84,7 +79,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -110,7 +105,7 @@ impl Config for Test { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); diff --git a/frame/uniques/src/types.rs b/frame/uniques/src/types.rs index 5b13998153e03..a2e804f245f77 100644 --- a/frame/uniques/src/types.rs +++ b/frame/uniques/src/types.rs @@ -24,12 +24,16 @@ use frame_support::{ }; use scale_info::TypeInfo; +/// A type alias for handling balance deposits. pub(super) type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +/// A type alias representing the details of a collection. pub(super) type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; +/// A type alias for the details of a single item. pub(super) type ItemDetailsFor = ItemDetails<::AccountId, DepositBalanceOf>; +/// A type alias to represent the price of an item. pub(super) type ItemPrice = <>::Currency as Currency<::AccountId>>::Balance; diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 14a55d163e0ff..eb80ee550a1db 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -15,38 +15,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_uniques +//! Autogenerated weights for `pallet_uniques` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-07-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_uniques // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/uniques/src/weights.rs +// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_uniques +// --chain=dev // --header=./HEADER-APACHE2 +// --output=./frame/uniques/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; -/// Weight functions needed for pallet_uniques. +/// Weight functions needed for `pallet_uniques`. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -76,51 +77,51 @@ pub trait WeightInfo { fn buy_item() -> Weight; } -/// Weights for pallet_uniques using the Substrate node and recommended hardware. +/// Weights for `pallet_uniques` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 32_067_000 picoseconds. - Weight::from_parts(32_817_000, 3643) + // Minimum execution time: 31_393_000 picoseconds. + Weight::from_parts(32_933_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 16_365_000 picoseconds. - Weight::from_parts(16_707_000, 3643) + // Minimum execution time: 14_827_000 picoseconds. + Weight::from_parts(15_273_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1001 w:1000) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1000 w:1000) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1000 w:1000) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:0 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1000) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques CollectionMaxSupply (r:0 w:1) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1001 w:1000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1000 w:1000) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1000) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. @@ -128,14 +129,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_498_918_000 picoseconds. - Weight::from_parts(2_516_809_000, 3643) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(6_648_035, 0).saturating_mul(n.into())) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(354_268, 0).saturating_mul(m.into())) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(223_770, 0).saturating_mul(a.into())) + // Minimum execution time: 3_281_673_000 picoseconds. + Weight::from_parts(3_443_387_000, 3643) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -148,371 +149,371 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques CollectionMaxSupply (r:1 w:0) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_677_000, 3643) + // Minimum execution time: 38_122_000 picoseconds. + Weight::from_parts(38_924_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 39_069_000 picoseconds. - Weight::from_parts(40_442_000, 3643) + // Minimum execution time: 38_835_000 picoseconds. + Weight::from_parts(39_754_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:2) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 28_085_000 picoseconds. - Weight::from_parts(28_403_000, 3643) + // Minimum execution time: 27_032_000 picoseconds. + Weight::from_parts(27_793_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:5000 w:5000) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:5000 w:5000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 16_202_000 picoseconds. - Weight::from_parts(16_380_000, 3643) - // Standard Error: 18_639 - .saturating_add(Weight::from_parts(16_047_161, 0).saturating_mul(i.into())) + // Minimum execution time: 14_737_000 picoseconds. + Weight::from_parts(15_070_000, 3643) + // Standard Error: 22_500 + .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 20_131_000 picoseconds. - Weight::from_parts(20_535_000, 3643) + // Minimum execution time: 18_664_000 picoseconds. + Weight::from_parts(19_455_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 19_895_000 picoseconds. - Weight::from_parts(20_198_000, 3643) + // Minimum execution time: 18_247_000 picoseconds. + Weight::from_parts(18_763_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_312_000 picoseconds. - Weight::from_parts(15_555_000, 3643) + // Minimum execution time: 13_219_000 picoseconds. + Weight::from_parts(13_923_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_145_000 picoseconds. - Weight::from_parts(15_371_000, 3643) + // Minimum execution time: 13_376_000 picoseconds. + Weight::from_parts(13_904_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques OwnershipAcceptance (r:1 w:1) - /// Proof: Uniques OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:2) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:2) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `423` // Estimated: `3643` - // Minimum execution time: 23_800_000 picoseconds. - Weight::from_parts(23_991_000, 3643) + // Minimum execution time: 22_353_000 picoseconds. + Weight::from_parts(23_222_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_929_000 picoseconds. - Weight::from_parts(16_219_000, 3643) + // Minimum execution time: 14_072_000 picoseconds. + Weight::from_parts(14_619_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 18_617_000 picoseconds. - Weight::from_parts(19_016_000, 3643) + // Minimum execution time: 17_081_000 picoseconds. + Weight::from_parts(17_698_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:0) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1 w:1) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `3829` - // Minimum execution time: 41_982_000 picoseconds. - Weight::from_parts(42_329_000, 3829) + // Minimum execution time: 41_501_000 picoseconds. + Weight::from_parts(43_101_000, 3829) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:0) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1 w:1) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `936` // Estimated: `3829` - // Minimum execution time: 39_921_000 picoseconds. - Weight::from_parts(40_499_000, 3829) + // Minimum execution time: 39_722_000 picoseconds. + Weight::from_parts(40_390_000, 3829) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:1) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` // Estimated: `3643` - // Minimum execution time: 31_774_000 picoseconds. - Weight::from_parts(32_327_000, 3643) + // Minimum execution time: 30_726_000 picoseconds. + Weight::from_parts(31_557_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:1) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `3643` - // Minimum execution time: 32_551_000 picoseconds. - Weight::from_parts(32_891_000, 3643) + // Minimum execution time: 31_303_000 picoseconds. + Weight::from_parts(32_389_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:1 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 33_490_000 picoseconds. - Weight::from_parts(34_617_000, 3643) + // Minimum execution time: 32_155_000 picoseconds. + Weight::from_parts(32_885_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:1 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `3643` - // Minimum execution time: 31_691_000 picoseconds. - Weight::from_parts(32_042_000, 3643) + // Minimum execution time: 30_044_000 picoseconds. + Weight::from_parts(31_405_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 20_738_000 picoseconds. - Weight::from_parts(21_067_000, 3643) + // Minimum execution time: 18_904_000 picoseconds. + Weight::from_parts(19_687_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 20_404_000 picoseconds. - Weight::from_parts(20_999_000, 3643) + // Minimum execution time: 19_144_000 picoseconds. + Weight::from_parts(19_706_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques OwnershipAcceptance (r:1 w:1) - /// Proof: Uniques OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 17_047_000 picoseconds. - Weight::from_parts(17_307_000, 3517) + // Minimum execution time: 15_339_000 picoseconds. + Weight::from_parts(15_918_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques CollectionMaxSupply (r:1 w:1) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_829_000 picoseconds. - Weight::from_parts(18_194_000, 3643) + // Minimum execution time: 15_387_000 picoseconds. + Weight::from_parts(15_726_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:0) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:0) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 17_620_000 picoseconds. - Weight::from_parts(17_931_000, 3587) + // Minimum execution time: 15_873_000 picoseconds. + Weight::from_parts(16_860_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:1 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:2) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 39_550_000 picoseconds. - Weight::from_parts(40_052_000, 3643) + // Minimum execution time: 37_245_000 picoseconds. + Weight::from_parts(38_383_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 32_067_000 picoseconds. - Weight::from_parts(32_817_000, 3643) + // Minimum execution time: 31_393_000 picoseconds. + Weight::from_parts(32_933_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 16_365_000 picoseconds. - Weight::from_parts(16_707_000, 3643) + // Minimum execution time: 14_827_000 picoseconds. + Weight::from_parts(15_273_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1001 w:1000) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1000 w:1000) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1000 w:1000) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:0 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1000) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques CollectionMaxSupply (r:0 w:1) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1001 w:1000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1000 w:1000) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1000) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. @@ -520,14 +521,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_498_918_000 picoseconds. - Weight::from_parts(2_516_809_000, 3643) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(6_648_035, 0).saturating_mul(n.into())) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(354_268, 0).saturating_mul(m.into())) - // Standard Error: 26_297 - .saturating_add(Weight::from_parts(223_770, 0).saturating_mul(a.into())) + // Minimum execution time: 3_281_673_000 picoseconds. + Weight::from_parts(3_443_387_000, 3643) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) + // Standard Error: 41_937 + .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -540,322 +541,322 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques CollectionMaxSupply (r:1 w:0) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_677_000, 3643) + // Minimum execution time: 38_122_000 picoseconds. + Weight::from_parts(38_924_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:1) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 39_069_000 picoseconds. - Weight::from_parts(40_442_000, 3643) + // Minimum execution time: 38_835_000 picoseconds. + Weight::from_parts(39_754_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:2) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 28_085_000 picoseconds. - Weight::from_parts(28_403_000, 3643) + // Minimum execution time: 27_032_000 picoseconds. + Weight::from_parts(27_793_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:5000 w:5000) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:5000 w:5000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 16_202_000 picoseconds. - Weight::from_parts(16_380_000, 3643) - // Standard Error: 18_639 - .saturating_add(Weight::from_parts(16_047_161, 0).saturating_mul(i.into())) + // Minimum execution time: 14_737_000 picoseconds. + Weight::from_parts(15_070_000, 3643) + // Standard Error: 22_500 + .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 20_131_000 picoseconds. - Weight::from_parts(20_535_000, 3643) + // Minimum execution time: 18_664_000 picoseconds. + Weight::from_parts(19_455_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 19_895_000 picoseconds. - Weight::from_parts(20_198_000, 3643) + // Minimum execution time: 18_247_000 picoseconds. + Weight::from_parts(18_763_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_312_000 picoseconds. - Weight::from_parts(15_555_000, 3643) + // Minimum execution time: 13_219_000 picoseconds. + Weight::from_parts(13_923_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_145_000 picoseconds. - Weight::from_parts(15_371_000, 3643) + // Minimum execution time: 13_376_000 picoseconds. + Weight::from_parts(13_904_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques OwnershipAcceptance (r:1 w:1) - /// Proof: Uniques OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:2) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:2) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `423` // Estimated: `3643` - // Minimum execution time: 23_800_000 picoseconds. - Weight::from_parts(23_991_000, 3643) + // Minimum execution time: 22_353_000 picoseconds. + Weight::from_parts(23_222_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_929_000 picoseconds. - Weight::from_parts(16_219_000, 3643) + // Minimum execution time: 14_072_000 picoseconds. + Weight::from_parts(14_619_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassAccount (r:0 w:1) - /// Proof: Uniques ClassAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 18_617_000 picoseconds. - Weight::from_parts(19_016_000, 3643) + // Minimum execution time: 17_081_000 picoseconds. + Weight::from_parts(17_698_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:0) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1 w:1) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `3829` - // Minimum execution time: 41_982_000 picoseconds. - Weight::from_parts(42_329_000, 3829) + // Minimum execution time: 41_501_000 picoseconds. + Weight::from_parts(43_101_000, 3829) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:0) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Uniques Attribute (r:1 w:1) - /// Proof: Uniques Attribute (max_values: None, max_size: Some(364), added: 2839, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `936` // Estimated: `3829` - // Minimum execution time: 39_921_000 picoseconds. - Weight::from_parts(40_499_000, 3829) + // Minimum execution time: 39_722_000 picoseconds. + Weight::from_parts(40_390_000, 3829) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:1) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` // Estimated: `3643` - // Minimum execution time: 31_774_000 picoseconds. - Weight::from_parts(32_327_000, 3643) + // Minimum execution time: 30_726_000 picoseconds. + Weight::from_parts(31_557_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques InstanceMetadataOf (r:1 w:1) - /// Proof: Uniques InstanceMetadataOf (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `3643` - // Minimum execution time: 32_551_000 picoseconds. - Weight::from_parts(32_891_000, 3643) + // Minimum execution time: 31_303_000 picoseconds. + Weight::from_parts(32_389_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:1) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:1 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 33_490_000 picoseconds. - Weight::from_parts(34_617_000, 3643) + // Minimum execution time: 32_155_000 picoseconds. + Weight::from_parts(32_885_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques ClassMetadataOf (r:1 w:1) - /// Proof: Uniques ClassMetadataOf (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `3643` - // Minimum execution time: 31_691_000 picoseconds. - Weight::from_parts(32_042_000, 3643) + // Minimum execution time: 30_044_000 picoseconds. + Weight::from_parts(31_405_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 20_738_000 picoseconds. - Weight::from_parts(21_067_000, 3643) + // Minimum execution time: 18_904_000 picoseconds. + Weight::from_parts(19_687_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 20_404_000 picoseconds. - Weight::from_parts(20_999_000, 3643) + // Minimum execution time: 19_144_000 picoseconds. + Weight::from_parts(19_706_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques OwnershipAcceptance (r:1 w:1) - /// Proof: Uniques OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 17_047_000 picoseconds. - Weight::from_parts(17_307_000, 3517) + // Minimum execution time: 15_339_000 picoseconds. + Weight::from_parts(15_918_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques CollectionMaxSupply (r:1 w:1) - /// Proof: Uniques CollectionMaxSupply (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_829_000 picoseconds. - Weight::from_parts(18_194_000, 3643) + // Minimum execution time: 15_387_000 picoseconds. + Weight::from_parts(15_726_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:0) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:0 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:0) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 17_620_000 picoseconds. - Weight::from_parts(17_931_000, 3587) + // Minimum execution time: 15_873_000 picoseconds. + Weight::from_parts(16_860_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Uniques Asset (r:1 w:1) - /// Proof: Uniques Asset (max_values: None, max_size: Some(122), added: 2597, mode: MaxEncodedLen) - /// Storage: Uniques ItemPriceOf (r:1 w:1) - /// Proof: Uniques ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Uniques Class (r:1 w:0) - /// Proof: Uniques Class (max_values: None, max_size: Some(178), added: 2653, mode: MaxEncodedLen) - /// Storage: Uniques Account (r:0 w:2) - /// Proof: Uniques Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 39_550_000 picoseconds. - Weight::from_parts(40_052_000, 3643) + // Minimum execution time: 37_245_000 picoseconds. + Weight::from_parts(38_383_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index a30feec467804..87281f0abdf19 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-root-testing = { version = "1.0.0-dev", path = "../root-testing" } pallet-collective = { version = "4.0.0-dev", path = "../collective" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] @@ -42,11 +42,26 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "pallet-collective/std", + "pallet-root-testing/std", + "pallet-timestamp/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-collective/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index ecc78ae6b17b6..c2fd3a851c319 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -27,15 +27,14 @@ use frame_support::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable, Pays}, error::BadOrigin, parameter_types, storage, - traits::{ConstU32, ConstU64, Contains, GenesisBuild}, + traits::{ConstU32, ConstU64, Contains}, weights::Weight, }; use pallet_collective::{EnsureProportionAtLeast, Instance1}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, Hash, IdentityLookup}, - TokenError, + BuildStorage, TokenError, }; type BlockNumber = u64; @@ -125,16 +124,12 @@ mod mock_democracy { } } -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, RootTesting: pallet_root_testing::{Pallet, Call, Storage}, @@ -155,14 +150,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -188,7 +182,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -265,7 +259,7 @@ use pallet_root_testing::Call as RootTestingCall; use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 0ff261a33f362..1a3ea6c1f7fc8 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_utility. pub trait WeightInfo { @@ -63,44 +67,44 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_932_000 picoseconds. - Weight::from_parts(24_064_040, 0) - // Standard Error: 2_486 - .saturating_add(Weight::from_parts(4_238_449, 0).saturating_mul(c.into())) + // Minimum execution time: 6_763_000 picoseconds. + Weight::from_parts(16_943_157, 0) + // Standard Error: 1_904 + .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_536_000 picoseconds. - Weight::from_parts(5_963_000, 0) + // Minimum execution time: 5_149_000 picoseconds. + Weight::from_parts(5_268_000, 0) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_820_000 picoseconds. - Weight::from_parts(18_969_535, 0) - // Standard Error: 2_228 - .saturating_add(Weight::from_parts(4_448_073, 0).saturating_mul(c.into())) + // Minimum execution time: 6_976_000 picoseconds. + Weight::from_parts(16_448_433, 0) + // Standard Error: 1_834 + .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_811_000 picoseconds. - Weight::from_parts(10_162_000, 0) + // Minimum execution time: 9_102_000 picoseconds. + Weight::from_parts(9_353_000, 0) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_829_000 picoseconds. - Weight::from_parts(12_960_288, 0) - // Standard Error: 2_222 - .saturating_add(Weight::from_parts(4_272_019, 0).saturating_mul(c.into())) + // Minimum execution time: 6_840_000 picoseconds. + Weight::from_parts(17_748_474, 0) + // Standard Error: 2_059 + .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) } } @@ -111,43 +115,43 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_932_000 picoseconds. - Weight::from_parts(24_064_040, 0) - // Standard Error: 2_486 - .saturating_add(Weight::from_parts(4_238_449, 0).saturating_mul(c.into())) + // Minimum execution time: 6_763_000 picoseconds. + Weight::from_parts(16_943_157, 0) + // Standard Error: 1_904 + .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_536_000 picoseconds. - Weight::from_parts(5_963_000, 0) + // Minimum execution time: 5_149_000 picoseconds. + Weight::from_parts(5_268_000, 0) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_820_000 picoseconds. - Weight::from_parts(18_969_535, 0) - // Standard Error: 2_228 - .saturating_add(Weight::from_parts(4_448_073, 0).saturating_mul(c.into())) + // Minimum execution time: 6_976_000 picoseconds. + Weight::from_parts(16_448_433, 0) + // Standard Error: 1_834 + .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_811_000 picoseconds. - Weight::from_parts(10_162_000, 0) + // Minimum execution time: 9_102_000 picoseconds. + Weight::from_parts(9_353_000, 0) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_829_000 picoseconds. - Weight::from_parts(12_960_288, 0) - // Standard Error: 2_222 - .saturating_add(Weight::from_parts(4_272_019, 0).saturating_mul(c.into())) + // Minimum execution time: 6_840_000 picoseconds. + Weight::from_parts(17_748_474, 0) + // Standard Error: 2_059 + .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) } } diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 2aa095bd74bff..1bf4491f3495b 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } log = { version = "0.4.17", default-features = false } @@ -21,13 +21,13 @@ scale-info = { version = "2.5.0", default-features = false, features = ["derive" frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } [features] default = ["std"] @@ -40,6 +40,20 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "sp-core/std", + "sp-io/std" +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime" ] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 15be519842992..4af48f5d368db 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -21,7 +21,7 @@ use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; -use frame_system::{Pallet as System, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; use super::*; @@ -55,7 +55,7 @@ fn add_vesting_schedules( let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); - System::::set_block_number(T::BlockNumber::zero()); + System::::set_block_number(BlockNumberFor::::zero()); let mut total_locked: BalanceOf = Zero::zero(); for _ in 0..n { @@ -88,7 +88,7 @@ benchmarks! { let expected_balance = add_vesting_schedules::(caller_lookup, s)?; // At block zero, everything is vested. - assert_eq!(System::::block_number(), T::BlockNumber::zero()); + assert_eq!(System::::block_number(), BlockNumberFor::::zero()); assert_eq!( Vesting::::vesting_balance(&caller), Some(expected_balance), @@ -144,7 +144,7 @@ benchmarks! { let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; // At block zero, everything is vested. - assert_eq!(System::::block_number(), T::BlockNumber::zero()); + assert_eq!(System::::block_number(), BlockNumberFor::::zero()); assert_eq!( Vesting::::vesting_balance(&other), Some(expected_balance), @@ -284,7 +284,7 @@ benchmarks! { let expected_balance = add_vesting_schedules::(caller_lookup, s)?; // Schedules are not vesting at block 0. - assert_eq!(System::::block_number(), T::BlockNumber::zero()); + assert_eq!(System::::block_number(), BlockNumberFor::::zero()); assert_eq!( Vesting::::vesting_balance(&caller), Some(expected_balance), diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8f98295dd385a..eb829121e9797 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -67,6 +67,7 @@ use frame_support::{ }, weights::Weight, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -127,8 +128,8 @@ impl VestingAction { /// Pick the schedules that this action dictates should continue vesting undisturbed. fn pick_schedules( &self, - schedules: Vec, T::BlockNumber>>, - ) -> impl Iterator, T::BlockNumber>> + '_ { + schedules: Vec, BlockNumberFor>>, + ) -> impl Iterator, BlockNumberFor>> + '_ { schedules.into_iter().enumerate().filter_map(move |(index, schedule)| { if self.should_remove(index) { None @@ -162,7 +163,7 @@ pub mod pallet { type Currency: LockableCurrency; /// Convert the block number into a balance. - type BlockNumberToBalance: Convert>; + type BlockNumberToBalance: Convert, BalanceOf>; /// The minimum amount transferred to call `vested_transfer`. #[pallet::constant] @@ -201,7 +202,7 @@ pub mod pallet { _, Blake2_128Concat, T::AccountId, - BoundedVec, T::BlockNumber>, MaxVestingSchedulesGet>, + BoundedVec, BlockNumberFor>, MaxVestingSchedulesGet>, >; /// Storage version of the pallet. @@ -216,11 +217,11 @@ pub mod pallet { #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { - pub vesting: Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>, + pub vesting: Vec<(T::AccountId, BlockNumberFor, BlockNumberFor, BalanceOf)>, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { use sp_runtime::traits::Saturating; @@ -342,7 +343,7 @@ pub mod pallet { pub fn vested_transfer( origin: OriginFor, target: AccountIdLookupOf, - schedule: VestingInfo, T::BlockNumber>, + schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; let transactor = ::unlookup(transactor); @@ -371,7 +372,7 @@ pub mod pallet { origin: OriginFor, source: AccountIdLookupOf, target: AccountIdLookupOf, - schedule: VestingInfo, T::BlockNumber>, + schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { ensure_root(origin)?; Self::do_vested_transfer(source, target, schedule) @@ -433,10 +434,10 @@ impl Pallet { // Create a new `VestingInfo`, based off of two other `VestingInfo`s. // NOTE: We assume both schedules have had funds unlocked up through the current block. fn merge_vesting_info( - now: T::BlockNumber, - schedule1: VestingInfo, T::BlockNumber>, - schedule2: VestingInfo, T::BlockNumber>, - ) -> Option, T::BlockNumber>> { + now: BlockNumberFor, + schedule1: VestingInfo, BlockNumberFor>, + schedule2: VestingInfo, BlockNumberFor>, + ) -> Option, BlockNumberFor>> { let schedule1_ending_block = schedule1.ending_block_as_balance::(); let schedule2_ending_block = schedule2.ending_block_as_balance::(); let now_as_balance = T::BlockNumberToBalance::convert(now); @@ -483,7 +484,7 @@ impl Pallet { fn do_vested_transfer( source: AccountIdLookupOf, target: AccountIdLookupOf, - schedule: VestingInfo, T::BlockNumber>, + schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { // Validate user inputs. ensure!(schedule.locked() >= T::MinVestedTransfer::get(), Error::::AmountLow); @@ -531,9 +532,9 @@ impl Pallet { /// /// NOTE: the amount locked does not include any schedules that are filtered out via `action`. fn report_schedule_updates( - schedules: Vec, T::BlockNumber>>, + schedules: Vec, BlockNumberFor>>, action: VestingAction, - ) -> (Vec, T::BlockNumber>>, BalanceOf) { + ) -> (Vec, BlockNumberFor>>, BalanceOf) { let now = >::block_number(); let mut total_locked_now: BalanceOf = Zero::zero(); @@ -570,10 +571,10 @@ impl Pallet { /// Write an accounts updated vesting schedules to storage. fn write_vesting( who: &T::AccountId, - schedules: Vec, T::BlockNumber>>, + schedules: Vec, BlockNumberFor>>, ) -> Result<(), DispatchError> { let schedules: BoundedVec< - VestingInfo, T::BlockNumber>, + VestingInfo, BlockNumberFor>, MaxVestingSchedulesGet, > = schedules.try_into().map_err(|_| Error::::AtMaxVestingSchedules)?; @@ -602,9 +603,9 @@ impl Pallet { /// Execute a `VestingAction` against the given `schedules`. Returns the updated schedules /// and locked amount. fn exec_action( - schedules: Vec, T::BlockNumber>>, + schedules: Vec, BlockNumberFor>>, action: VestingAction, - ) -> Result<(Vec, T::BlockNumber>>, BalanceOf), DispatchError> { + ) -> Result<(Vec, BlockNumberFor>>, BalanceOf), DispatchError> { let (schedules, locked_now) = match action { VestingAction::Merge { index1: idx1, index2: idx2 } => { // The schedule index is based off of the schedule ordering prior to filtering out @@ -649,7 +650,7 @@ where BalanceOf: MaybeSerializeDeserialize + Debug, { type Currency = T::Currency; - type Moment = T::BlockNumber; + type Moment = BlockNumberFor; /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { @@ -680,7 +681,7 @@ where who: &T::AccountId, locked: BalanceOf, per_block: BalanceOf, - starting_block: T::BlockNumber, + starting_block: BlockNumberFor, ) -> DispatchResult { if locked.is_zero() { return Ok(()) @@ -713,7 +714,7 @@ where who: &T::AccountId, locked: BalanceOf, per_block: BalanceOf, - starting_block: T::BlockNumber, + starting_block: BlockNumberFor, ) -> DispatchResult { // Check for `per_block` or `locked` of 0. if !VestingInfo::new(locked, per_block, starting_block).is_valid() { diff --git a/frame/vesting/src/migrations.rs b/frame/vesting/src/migrations.rs index 69bbc97296500..cac3c90b403ab 100644 --- a/frame/vesting/src/migrations.rs +++ b/frame/vesting/src/migrations.rs @@ -40,12 +40,12 @@ pub mod v1 { pub fn migrate() -> Weight { let mut reads_writes = 0; - Vesting::::translate::, T::BlockNumber>, _>( + Vesting::::translate::, BlockNumberFor>, _>( |_key, vesting_info| { reads_writes += 1; let v: Option< BoundedVec< - VestingInfo, T::BlockNumber>, + VestingInfo, BlockNumberFor>, MaxVestingSchedulesGet, >, > = vec![vesting_info].try_into().ok(); diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 1adb36b730b1a..fe1779475a69a 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -17,27 +17,23 @@ use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, WithdrawReasons}, + traits::{ConstU32, ConstU64, WithdrawReasons}, }; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, Identity, IdentityLookup}, + BuildStorage, }; use super::*; use crate as pallet_vesting; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, } @@ -49,15 +45,14 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockHashCount = ConstU64<250>; type BlockLength = (); - type BlockNumber = u64; type BlockWeights = (); type RuntimeCall = RuntimeCall; type DbWeight = (); type RuntimeEvent = RuntimeEvent; type Hash = H256; type Hashing = BlakeTwo256; - type Header = Header; - type Index = u64; + type Block = Block; + type Nonce = u64; type Lookup = IdentityLookup; type OnKilledAccount = (); type OnNewAccount = (); @@ -82,7 +77,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } parameter_types! { @@ -125,7 +120,7 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.existential_deposit), diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 4cf3b3dcfbfd0..17bde88857779 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_vesting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_vesting +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_vesting. pub trait WeightInfo { @@ -73,12 +77,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_182_000 picoseconds. - Weight::from_parts(35_159_830, 4764) - // Standard Error: 952 - .saturating_add(Weight::from_parts(63_309, 0).saturating_mul(l.into())) - // Standard Error: 1_694 - .saturating_add(Weight::from_parts(62_244, 0).saturating_mul(s.into())) + // Minimum execution time: 35_336_000 picoseconds. + Weight::from_parts(34_290_169, 4764) + // Standard Error: 1_381 + .saturating_add(Weight::from_parts(76_354, 0).saturating_mul(l.into())) + // Standard Error: 2_457 + .saturating_add(Weight::from_parts(81_362, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -94,12 +98,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_344_000 picoseconds. - Weight::from_parts(38_921_936, 4764) - // Standard Error: 1_283 - .saturating_add(Weight::from_parts(61_531, 0).saturating_mul(l.into())) - // Standard Error: 2_283 - .saturating_add(Weight::from_parts(36_175, 0).saturating_mul(s.into())) + // Minimum execution time: 38_540_000 picoseconds. + Weight::from_parts(38_893_820, 4764) + // Standard Error: 1_710 + .saturating_add(Weight::from_parts(62_106, 0).saturating_mul(l.into())) + // Standard Error: 3_043 + .saturating_add(Weight::from_parts(41_966, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -117,12 +121,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_461_000 picoseconds. - Weight::from_parts(38_206_465, 4764) - // Standard Error: 743 - .saturating_add(Weight::from_parts(56_973, 0).saturating_mul(l.into())) - // Standard Error: 1_322 - .saturating_add(Weight::from_parts(65_059, 0).saturating_mul(s.into())) + // Minimum execution time: 37_529_000 picoseconds. + Weight::from_parts(36_781_151, 4764) + // Standard Error: 1_490 + .saturating_add(Weight::from_parts(76_322, 0).saturating_mul(l.into())) + // Standard Error: 2_652 + .saturating_add(Weight::from_parts(76_914, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -140,12 +144,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 42_029_000 picoseconds. - Weight::from_parts(42_153_438, 4764) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(50_058, 0).saturating_mul(l.into())) - // Standard Error: 1_971 - .saturating_add(Weight::from_parts(32_391, 0).saturating_mul(s.into())) + // Minimum execution time: 41_217_000 picoseconds. + Weight::from_parts(40_942_515, 4764) + // Standard Error: 2_098 + .saturating_add(Weight::from_parts(65_213, 0).saturating_mul(l.into())) + // Standard Error: 3_733 + .saturating_add(Weight::from_parts(63_326, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -163,12 +167,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 75_223_000 picoseconds. - Weight::from_parts(76_675_778, 4764) - // Standard Error: 2_534 - .saturating_add(Weight::from_parts(70_731, 0).saturating_mul(l.into())) - // Standard Error: 4_509 - .saturating_add(Weight::from_parts(108_866, 0).saturating_mul(s.into())) + // Minimum execution time: 76_396_000 picoseconds. + Weight::from_parts(77_085_336, 4764) + // Standard Error: 2_795 + .saturating_add(Weight::from_parts(88_995, 0).saturating_mul(l.into())) + // Standard Error: 4_974 + .saturating_add(Weight::from_parts(135_384, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -186,12 +190,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 76_922_000 picoseconds. - Weight::from_parts(78_634_098, 6196) - // Standard Error: 2_099 - .saturating_add(Weight::from_parts(68_218, 0).saturating_mul(l.into())) - // Standard Error: 3_736 - .saturating_add(Weight::from_parts(95_990, 0).saturating_mul(s.into())) + // Minimum execution time: 77_312_000 picoseconds. + Weight::from_parts(79_600_900, 6196) + // Standard Error: 3_232 + .saturating_add(Weight::from_parts(78_018, 0).saturating_mul(l.into())) + // Standard Error: 5_750 + .saturating_add(Weight::from_parts(100_848, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -209,12 +213,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_476_000 picoseconds. - Weight::from_parts(38_261_747, 4764) - // Standard Error: 1_794 - .saturating_add(Weight::from_parts(69_639, 0).saturating_mul(l.into())) - // Standard Error: 3_313 - .saturating_add(Weight::from_parts(73_202, 0).saturating_mul(s.into())) + // Minimum execution time: 38_769_000 picoseconds. + Weight::from_parts(37_752_437, 4764) + // Standard Error: 1_415 + .saturating_add(Weight::from_parts(78_398, 0).saturating_mul(l.into())) + // Standard Error: 2_614 + .saturating_add(Weight::from_parts(78_922, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -232,12 +236,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_764_000 picoseconds. - Weight::from_parts(42_679_386, 4764) - // Standard Error: 1_224 - .saturating_add(Weight::from_parts(65_857, 0).saturating_mul(l.into())) - // Standard Error: 2_261 - .saturating_add(Weight::from_parts(70_861, 0).saturating_mul(s.into())) + // Minimum execution time: 43_021_000 picoseconds. + Weight::from_parts(42_182_858, 4764) + // Standard Error: 1_747 + .saturating_add(Weight::from_parts(83_938, 0).saturating_mul(l.into())) + // Standard Error: 3_227 + .saturating_add(Weight::from_parts(84_652, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -257,12 +261,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_182_000 picoseconds. - Weight::from_parts(35_159_830, 4764) - // Standard Error: 952 - .saturating_add(Weight::from_parts(63_309, 0).saturating_mul(l.into())) - // Standard Error: 1_694 - .saturating_add(Weight::from_parts(62_244, 0).saturating_mul(s.into())) + // Minimum execution time: 35_336_000 picoseconds. + Weight::from_parts(34_290_169, 4764) + // Standard Error: 1_381 + .saturating_add(Weight::from_parts(76_354, 0).saturating_mul(l.into())) + // Standard Error: 2_457 + .saturating_add(Weight::from_parts(81_362, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -278,12 +282,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_344_000 picoseconds. - Weight::from_parts(38_921_936, 4764) - // Standard Error: 1_283 - .saturating_add(Weight::from_parts(61_531, 0).saturating_mul(l.into())) - // Standard Error: 2_283 - .saturating_add(Weight::from_parts(36_175, 0).saturating_mul(s.into())) + // Minimum execution time: 38_540_000 picoseconds. + Weight::from_parts(38_893_820, 4764) + // Standard Error: 1_710 + .saturating_add(Weight::from_parts(62_106, 0).saturating_mul(l.into())) + // Standard Error: 3_043 + .saturating_add(Weight::from_parts(41_966, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -301,12 +305,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_461_000 picoseconds. - Weight::from_parts(38_206_465, 4764) - // Standard Error: 743 - .saturating_add(Weight::from_parts(56_973, 0).saturating_mul(l.into())) - // Standard Error: 1_322 - .saturating_add(Weight::from_parts(65_059, 0).saturating_mul(s.into())) + // Minimum execution time: 37_529_000 picoseconds. + Weight::from_parts(36_781_151, 4764) + // Standard Error: 1_490 + .saturating_add(Weight::from_parts(76_322, 0).saturating_mul(l.into())) + // Standard Error: 2_652 + .saturating_add(Weight::from_parts(76_914, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -324,12 +328,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 42_029_000 picoseconds. - Weight::from_parts(42_153_438, 4764) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(50_058, 0).saturating_mul(l.into())) - // Standard Error: 1_971 - .saturating_add(Weight::from_parts(32_391, 0).saturating_mul(s.into())) + // Minimum execution time: 41_217_000 picoseconds. + Weight::from_parts(40_942_515, 4764) + // Standard Error: 2_098 + .saturating_add(Weight::from_parts(65_213, 0).saturating_mul(l.into())) + // Standard Error: 3_733 + .saturating_add(Weight::from_parts(63_326, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -347,12 +351,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 75_223_000 picoseconds. - Weight::from_parts(76_675_778, 4764) - // Standard Error: 2_534 - .saturating_add(Weight::from_parts(70_731, 0).saturating_mul(l.into())) - // Standard Error: 4_509 - .saturating_add(Weight::from_parts(108_866, 0).saturating_mul(s.into())) + // Minimum execution time: 76_396_000 picoseconds. + Weight::from_parts(77_085_336, 4764) + // Standard Error: 2_795 + .saturating_add(Weight::from_parts(88_995, 0).saturating_mul(l.into())) + // Standard Error: 4_974 + .saturating_add(Weight::from_parts(135_384, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -370,12 +374,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 76_922_000 picoseconds. - Weight::from_parts(78_634_098, 6196) - // Standard Error: 2_099 - .saturating_add(Weight::from_parts(68_218, 0).saturating_mul(l.into())) - // Standard Error: 3_736 - .saturating_add(Weight::from_parts(95_990, 0).saturating_mul(s.into())) + // Minimum execution time: 77_312_000 picoseconds. + Weight::from_parts(79_600_900, 6196) + // Standard Error: 3_232 + .saturating_add(Weight::from_parts(78_018, 0).saturating_mul(l.into())) + // Standard Error: 5_750 + .saturating_add(Weight::from_parts(100_848, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -393,12 +397,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_476_000 picoseconds. - Weight::from_parts(38_261_747, 4764) - // Standard Error: 1_794 - .saturating_add(Weight::from_parts(69_639, 0).saturating_mul(l.into())) - // Standard Error: 3_313 - .saturating_add(Weight::from_parts(73_202, 0).saturating_mul(s.into())) + // Minimum execution time: 38_769_000 picoseconds. + Weight::from_parts(37_752_437, 4764) + // Standard Error: 1_415 + .saturating_add(Weight::from_parts(78_398, 0).saturating_mul(l.into())) + // Standard Error: 2_614 + .saturating_add(Weight::from_parts(78_922, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -416,12 +420,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_764_000 picoseconds. - Weight::from_parts(42_679_386, 4764) - // Standard Error: 1_224 - .saturating_add(Weight::from_parts(65_857, 0).saturating_mul(l.into())) - // Standard Error: 2_261 - .saturating_add(Weight::from_parts(70_861, 0).saturating_mul(s.into())) + // Minimum execution time: 43_021_000 picoseconds. + Weight::from_parts(42_182_858, 4764) + // Standard Error: 1_747 + .saturating_add(Weight::from_parts(83_938, 0).saturating_mul(l.into())) + // Standard Error: 3_227 + .saturating_add(Weight::from_parts(84_652, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/frame/whitelist/Cargo.toml b/frame/whitelist/Cargo.toml index 21ecc4be374d3..ba05859df2913 100644 --- a/frame/whitelist/Cargo.toml +++ b/frame/whitelist/Cargo.toml @@ -12,20 +12,20 @@ description = "FRAME pallet for whitelisting call, and dispatch from specific or targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-io = { version = "23.0.0", path = "../../primitives/io" } [features] default = ["std"] @@ -38,10 +38,24 @@ std = [ "sp-api/std", "sp-runtime/std", "sp-std/std", + "pallet-balances/std", + "pallet-preimage/std", + "sp-core/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-preimage/try-runtime", + "sp-runtime/try-runtime" ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index d644cd661ec9f..d91f43b33af91 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -28,19 +28,14 @@ use frame_support::{ use frame_system::EnsureRoot; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Balances: pallet_balances, @@ -55,14 +50,13 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); @@ -88,7 +82,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } @@ -111,7 +105,7 @@ impl pallet_whitelist::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig::default().build_storage().unwrap(); + let t = RuntimeGenesisConfig::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 8636ea376e246..de42c5a5841cf 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_whitelist //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -31,6 +31,9 @@ // --steps=50 // --repeat=20 // --pallet=pallet_whitelist +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --execution=wasm // --wasm-execution=compiled @@ -42,9 +45,10 @@ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for pallet_whitelist. pub trait WeightInfo { @@ -65,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `217` // Estimated: `3556` - // Minimum execution time: 21_370_000 picoseconds. - Weight::from_parts(21_834_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_892_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -78,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `346` // Estimated: `3556` - // Minimum execution time: 19_222_000 picoseconds. - Weight::from_parts(19_582_000, 3556) + // Minimum execution time: 18_142_000 picoseconds. + Weight::from_parts(18_529_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -94,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `422 + n * (1 ±0)` // Estimated: `3886 + n * (1 ±0)` - // Minimum execution time: 31_417_000 picoseconds. - Weight::from_parts(31_620_000, 3886) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) + // Minimum execution time: 30_671_000 picoseconds. + Weight::from_parts(31_197_000, 3886) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -111,10 +115,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `346` // Estimated: `3556` - // Minimum execution time: 23_092_000 picoseconds. - Weight::from_parts(24_043_432, 3556) + // Minimum execution time: 22_099_000 picoseconds. + Weight::from_parts(23_145_477, 3556) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_227, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -130,8 +134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `217` // Estimated: `3556` - // Minimum execution time: 21_370_000 picoseconds. - Weight::from_parts(21_834_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_892_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -143,8 +147,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `346` // Estimated: `3556` - // Minimum execution time: 19_222_000 picoseconds. - Weight::from_parts(19_582_000, 3556) + // Minimum execution time: 18_142_000 picoseconds. + Weight::from_parts(18_529_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -159,10 +163,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `422 + n * (1 ±0)` // Estimated: `3886 + n * (1 ±0)` - // Minimum execution time: 31_417_000 picoseconds. - Weight::from_parts(31_620_000, 3886) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) + // Minimum execution time: 30_671_000 picoseconds. + Weight::from_parts(31_197_000, 3886) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -176,10 +180,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `346` // Estimated: `3556` - // Minimum execution time: 23_092_000 picoseconds. - Weight::from_parts(24_043_432, 3556) + // Minimum execution time: 22_099_000 picoseconds. + Weight::from_parts(23_145_477, 3556) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_227, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 1d140854f6cce..2f0fe5d5d93cb 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -13,18 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } sp-api-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-version = { version = "5.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.13.0", default-features = false, optional = true, path = "../state-machine" } -sp-trie = { version = "7.0.0", default-features = false, optional = true, path = "../trie" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-externalities = { version = "0.19.0", default-features = false, optional = true, path = "../externalities" } +sp-version = { version = "22.0.0", default-features = false, path = "../version" } +sp-state-machine = { version = "0.28.0", default-features = false, optional = true, path = "../state-machine" } +sp-trie = { version = "22.0.0", default-features = false, optional = true, path = "../trie" } hash-db = { version = "0.16.0", optional = true } thiserror = { version = "1.0.30", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -sp-metadata-ir = { version = "0.1.0", default-features = false, path = "../metadata-ir" } +sp-metadata-ir = { version = "0.1.0", default-features = false, optional = true, path = "../metadata-ir" } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -35,6 +36,7 @@ default = ["std"] std = [ "codec/std", "sp-core/std", + "sp-externalities", "sp-std/std", "sp-runtime/std", "sp-state-machine/std", @@ -44,9 +46,12 @@ std = [ "thiserror", "log/std", "scale-info/std", - "sp-metadata-ir/std", + "sp-metadata-ir?/std", + "sp-api-proc-macro/std", + "sp-externalities?/std", + "sp-test-primitives/std" ] -# Special feature to disable logging completly. +# Special feature to disable logging completely. # # By default `sp-api` initializes the `RuntimeLogger` for each runtime api function. However, # logging functionality increases the code size. It is recommended to enable this feature when @@ -56,3 +61,7 @@ std = [ disable-logging = ["log/max_level_off"] # Do not report the documentation in the metadata. no-metadata-docs = ["sp-api-proc-macro/no-metadata-docs"] +frame-metadata = [ + "sp-metadata-ir", + "sp-api-proc-macro/frame-metadata" +] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 9d721950f90be..fb6513806c812 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "fold", "extra-traits", "visit"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" -expander = "1.0.0" +expander = "2.0.0" Inflector = "0.11.4" [dev-dependencies] @@ -32,3 +32,4 @@ assert_matches = "1.3.0" default = ["std"] std = [] no-metadata-docs = [] +frame-metadata = [] diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index cde33c19016bd..370735819f94c 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -20,7 +20,6 @@ use crate::{ API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, }, - runtime_metadata::generate_decl_runtime_metadata, utils::{ extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, generate_runtime_mod_name_for_trait, parse_runtime_api_version, @@ -88,32 +87,6 @@ fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static s result } -/// Visits the ast and checks if `Block` ident is used somewhere. -struct IsUsingBlock { - result: bool, -} - -impl<'ast> Visit<'ast> for IsUsingBlock { - fn visit_ident(&mut self, i: &'ast Ident) { - if i == BLOCK_GENERIC_IDENT { - self.result = true; - } - } -} - -/// Replace all occurrences of `Block` with `NodeBlock` -struct ReplaceBlockWithNodeBlock {} - -impl Fold for ReplaceBlockWithNodeBlock { - fn fold_ident(&mut self, input: Ident) -> Ident { - if input == BLOCK_GENERIC_IDENT { - Ident::new("NodeBlock", Span::call_site()) - } else { - input - } - } -} - /// Versioned API traits are used to catch missing methods when implementing a specific version of a /// versioned API. They contain all non-versioned methods (aka stable methods) from the main trait /// and all versioned methods for the specific version. This means that there is one trait for each @@ -214,13 +187,17 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut decl = decl.clone(); let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); - let metadata = generate_decl_runtime_metadata(&decl); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); let api_version = get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); + #[cfg(feature = "frame-metadata")] + let metadata = crate::runtime_metadata::generate_decl_runtime_metadata(&decl); + #[cfg(not(feature = "frame-metadata"))] + let metadata = quote!(); + let trait_api_version = get_api_version(&found_attributes)?; let mut methods_by_version: BTreeMap> = BTreeMap::new(); @@ -338,7 +315,6 @@ impl<'a> ToClientSideDecl<'a> { fn __runtime_api_internal_call_api_at( &self, at: #block_hash, - context: #crate_::ExecutionContext, params: std::vec::Vec, fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, ) -> std::result::Result, #crate_::ApiError>; @@ -358,9 +334,8 @@ impl<'a> ToClientSideDecl<'a> { items.into_iter().for_each(|i| match i { TraitItem::Fn(method) => { - let (fn_decl, fn_decl_ctx) = self.fold_trait_item_fn(method, trait_generics_num); + let fn_decl = self.create_method_decl(method, trait_generics_num); result.push(fn_decl.into()); - result.push(fn_decl_ctx.into()); }, r => result.push(r), }); @@ -368,41 +343,12 @@ impl<'a> ToClientSideDecl<'a> { result } - fn fold_trait_item_fn( - &mut self, - method: TraitItemFn, - trait_generics_num: usize, - ) -> (TraitItemFn, TraitItemFn) { - let crate_ = self.crate_; - let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); - let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num); - - (fn_decl, fn_decl_ctx) - } - - fn create_method_decl_with_context( - &mut self, - method: TraitItemFn, - trait_generics_num: usize, - ) -> TraitItemFn { - let crate_ = self.crate_; - let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); - fn_decl_ctx.sig.ident = - Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); - fn_decl_ctx.sig.inputs.insert(2, context_arg); - - fn_decl_ctx - } - /// Takes the method declared by the user and creates the declaration we require for the runtime /// api client side. This method will call by default the `method_runtime_api_impl` for doing /// the actual call into the runtime. fn create_method_decl( &mut self, mut method: TraitItemFn, - context: TokenStream, trait_generics_num: usize, ) -> TraitItemFn { let params = match extract_parameter_names_types_and_borrows( @@ -490,7 +436,6 @@ impl<'a> ToClientSideDecl<'a> { >::__runtime_api_internal_call_api_at( self, __runtime_api_at_param__, - #context, __runtime_api_impl_params_encoded__, &|_version| { #( @@ -539,8 +484,6 @@ impl<'a> Fold for ToClientSideDecl<'a> { input.supertraits.push(parse_quote!( #crate_::Core<#block_ident> )); } - // The client side trait is only required when compiling with the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); input.items = self.fold_item_trait_items(input.items, input.generics.params.len()); fold::fold_item_trait(self, input) @@ -584,12 +527,13 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { }); quote!( - #[cfg(any(feature = "std", test))] - impl < #( #impl_generics, )* > #crate_::RuntimeApiInfo - for dyn #trait_name < #( #ty_generics, )* > - { - #id - #version + #crate_::std_enabled! { + impl < #( #impl_generics, )* > #crate_::RuntimeApiInfo + for dyn #trait_name < #( #ty_generics, )* > + { + #id + #version + } } ) } @@ -636,7 +580,11 @@ fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { let runtime_info = api_version.map(|v| generate_runtime_info_impl(&decl, v))?; - result.push(quote!( #decl #runtime_info #( #errors )* )); + result.push(quote!( + #crate_::std_enabled! { #decl } + #runtime_info + #( #errors )* + )); } Ok(quote!( #( #result )* )) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index b8dcf625df45e..66bc5b0e9e5e3 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -17,7 +17,6 @@ use crate::{ common::API_VERSION_ATTRIBUTE, - runtime_metadata::generate_impl_runtime_metadata, utils::{ extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, extract_parameter_names_types_and_borrows, generate_crate_access, @@ -172,11 +171,12 @@ fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { }); Ok(quote!( - #[cfg(feature = "std")] - pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { - match method { - #( #impl_calls )* - _ => None, + #c::std_enabled! { + pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { + match method { + #( #impl_calls )* + _ => None, + } } } )) @@ -195,22 +195,23 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { Ident::new(&prefix_function_with_trait(&trait_, &fn_name), Span::call_site()); quote!( - #( #attrs )* - #[cfg(not(feature = "std"))] - #[no_mangle] - pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - #c::slice::from_raw_parts(input_data, input_len) - } - }; - - #c::init_runtime_logger(); - - let output = (move || { #impl_ })(); - #c::to_substrate_wasm_fn_return_value(&output) + #c::std_disabled! { + #( #attrs )* + #[no_mangle] + pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + #c::slice::from_raw_parts(input_data, input_len) + } + }; + + #c::init_runtime_logger(); + + let output = (move || { #impl_ })(); + #c::to_substrate_wasm_fn_return_value(&output) + } } ) }); @@ -223,132 +224,140 @@ fn generate_runtime_api_base_structures() -> Result { Ok(quote!( pub struct RuntimeApi {} - /// Implements all runtime apis for the client side. - #[cfg(any(feature = "std", test))] - pub struct RuntimeApiImpl + 'static> { - call: &'static C, - commit_on_success: std::cell::RefCell, - changes: std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: std::cell::RefCell< - #crate_::StorageTransactionCache - >, - recorder: std::option::Option<#crate_::ProofRecorder>, - } - - #[cfg(any(feature = "std", test))] - impl> #crate_::ApiExt for - RuntimeApiImpl - { - type StateBackend = C::StateBackend; - - fn execute_in_transaction #crate_::TransactionOutcome, R>( - &self, - call: F, - ) -> R where Self: Sized { - self.start_transaction(); - - *std::cell::RefCell::borrow_mut(&self.commit_on_success) = false; - let res = call(self); - *std::cell::RefCell::borrow_mut(&self.commit_on_success) = true; - - self.commit_or_rollback(std::matches!(res, #crate_::TransactionOutcome::Commit(_))); - - res.into_inner() + #crate_::std_enabled! { + /// Implements all runtime apis for the client side. + pub struct RuntimeApiImpl + 'static> { + call: &'static C, + transaction_depth: std::cell::RefCell, + changes: std::cell::RefCell<#crate_::OverlayedChanges<#crate_::HashingFor>>, + recorder: std::option::Option<#crate_::ProofRecorder>, + call_context: #crate_::CallContext, + extensions: std::cell::RefCell<#crate_::Extensions>, + extensions_generated_for: std::cell::RefCell>, } - fn has_api( - &self, - at: ::Hash, - ) -> std::result::Result where Self: Sized { - #crate_::CallApiAt::::runtime_version_at(self.call, at) + impl> #crate_::ApiExt for + RuntimeApiImpl + { + fn execute_in_transaction #crate_::TransactionOutcome, R>( + &self, + call: F, + ) -> R where Self: Sized { + self.start_transaction(); + + *std::cell::RefCell::borrow_mut(&self.transaction_depth) += 1; + let res = call(self); + std::cell::RefCell::borrow_mut(&self.transaction_depth) + .checked_sub(1) + .expect("Transactions are opened and closed together; qed"); + + self.commit_or_rollback_transaction( + std::matches!(res, #crate_::TransactionOutcome::Commit(_)) + ); + + res.into_inner() + } + + fn has_api( + &self, + at: ::Hash, + ) -> std::result::Result where Self: Sized { + #crate_::CallApiAt::::runtime_version_at(self.call, at) .map(|v| #crate_::RuntimeVersion::has_api_with(&v, &A::ID, |v| v == A::VERSION)) - } + } - fn has_api_with bool>( - &self, - at: ::Hash, - pred: P, - ) -> std::result::Result where Self: Sized { - #crate_::CallApiAt::::runtime_version_at(self.call, at) + fn has_api_with bool>( + &self, + at: ::Hash, + pred: P, + ) -> std::result::Result where Self: Sized { + #crate_::CallApiAt::::runtime_version_at(self.call, at) .map(|v| #crate_::RuntimeVersion::has_api_with(&v, &A::ID, pred)) - } + } - fn api_version( - &self, - at: ::Hash, - ) -> std::result::Result, #crate_::ApiError> where Self: Sized { - #crate_::CallApiAt::::runtime_version_at(self.call, at) + fn api_version( + &self, + at: ::Hash, + ) -> std::result::Result, #crate_::ApiError> where Self: Sized { + #crate_::CallApiAt::::runtime_version_at(self.call, at) .map(|v| #crate_::RuntimeVersion::api_version(&v, &A::ID)) - } + } - fn record_proof(&mut self) { - self.recorder = std::option::Option::Some(std::default::Default::default()); - } + fn record_proof(&mut self) { + self.recorder = std::option::Option::Some(std::default::Default::default()); + } - fn proof_recorder(&self) -> std::option::Option<#crate_::ProofRecorder> { - std::clone::Clone::clone(&self.recorder) - } + fn proof_recorder(&self) -> std::option::Option<#crate_::ProofRecorder> { + std::clone::Clone::clone(&self.recorder) + } - fn extract_proof( - &mut self, - ) -> std::option::Option<#crate_::StorageProof> { - let recorder = std::option::Option::take(&mut self.recorder); - std::option::Option::map(recorder, |recorder| { - #crate_::ProofRecorder::::drain_storage_proof(recorder) - }) - } + fn extract_proof( + &mut self, + ) -> std::option::Option<#crate_::StorageProof> { + let recorder = std::option::Option::take(&mut self.recorder); + std::option::Option::map(recorder, |recorder| { + #crate_::ProofRecorder::::drain_storage_proof(recorder) + }) + } - fn into_storage_changes( - &self, - backend: &Self::StateBackend, - parent_hash: Block::Hash, - ) -> core::result::Result< - #crate_::StorageChanges, + fn into_storage_changes>>( + &self, + backend: &B, + parent_hash: Block::Hash, + ) -> core::result::Result< + #crate_::StorageChanges, String - > where Self: Sized { - let state_version = #crate_::CallApiAt::::runtime_version_at(self.call, std::clone::Clone::clone(&parent_hash)) - .map(|v| #crate_::RuntimeVersion::state_version(&v)) - .map_err(|e| format!("Failed to get state version: {}", e))?; - - #crate_::OverlayedChanges::into_storage_changes( - std::cell::RefCell::take(&self.changes), - backend, - core::cell::RefCell::take(&self.storage_transaction_cache), - state_version, - ) + > where Self: Sized { + let state_version = #crate_::CallApiAt::::runtime_version_at(self.call, std::clone::Clone::clone(&parent_hash)) + .map(|v| #crate_::RuntimeVersion::state_version(&v)) + .map_err(|e| format!("Failed to get state version: {}", e))?; + + #crate_::OverlayedChanges::drain_storage_changes( + &mut std::cell::RefCell::borrow_mut(&self.changes), + backend, + state_version, + ) + } + + fn set_call_context(&mut self, call_context: #crate_::CallContext) { + self.call_context = call_context; + } + + fn register_extension(&mut self, extension: E) { + std::cell::RefCell::borrow_mut(&self.extensions).register(extension); + } } - } - #[cfg(any(feature = "std", test))] - impl #crate_::ConstructRuntimeApi - for RuntimeApi - where - C: #crate_::CallApiAt + 'static, - { - type RuntimeApi = RuntimeApiImpl; - - fn construct_runtime_api<'a>( - call: &'a C, - ) -> #crate_::ApiRef<'a, Self::RuntimeApi> { - RuntimeApiImpl { - call: unsafe { std::mem::transmute(call) }, - commit_on_success: true.into(), - changes: std::default::Default::default(), - recorder: std::default::Default::default(), - storage_transaction_cache: std::default::Default::default(), - }.into() + impl #crate_::ConstructRuntimeApi + for RuntimeApi + where + C: #crate_::CallApiAt + 'static, + { + type RuntimeApi = RuntimeApiImpl; + + fn construct_runtime_api<'a>( + call: &'a C, + ) -> #crate_::ApiRef<'a, Self::RuntimeApi> { + RuntimeApiImpl { + call: unsafe { std::mem::transmute(call) }, + transaction_depth: 0.into(), + changes: std::default::Default::default(), + recorder: std::default::Default::default(), + call_context: #crate_::CallContext::Offchain, + extensions: std::default::Default::default(), + extensions_generated_for: std::default::Default::default(), + }.into() + } } - } - #[cfg(any(feature = "std", test))] - impl> RuntimeApiImpl { - fn commit_or_rollback(&self, commit: bool) { - let proof = "\ + impl> RuntimeApiImpl { + fn commit_or_rollback_transaction(&self, commit: bool) { + let proof = "\ We only close a transaction when we opened one ourself. Other parts of the runtime that make use of transactions (state-machine) also balance their transactions. The runtime cannot close client initiated transactions; qed"; - if *std::cell::RefCell::borrow(&self.commit_on_success) { + let res = if commit { let res = if let Some(recorder) = &self.recorder { #crate_::ProofRecorder::::commit_transaction(&recorder) @@ -381,18 +390,14 @@ fn generate_runtime_api_base_structures() -> Result { std::result::Result::expect(res, proof); } - } - fn start_transaction(&self) { - if !*std::cell::RefCell::borrow(&self.commit_on_success) { - return - } - - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); - if let Some(recorder) = &self.recorder { - #crate_::ProofRecorder::::start_transaction(&recorder); + fn start_transaction(&self) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); + if let Some(recorder) = &self.recorder { + #crate_::ProofRecorder::::start_transaction(&recorder); + } } } } @@ -482,11 +487,16 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { fn __runtime_api_internal_call_api_at( &self, at: <__SrApiBlock__ as #crate_::BlockT>::Hash, - context: #crate_::ExecutionContext, params: std::vec::Vec, fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, ) -> std::result::Result, #crate_::ApiError> { - self.start_transaction(); + // If we are not already in a transaction, we should create a new transaction + // and then commit/roll it back at the end! + let transaction_depth = *std::cell::RefCell::borrow(&self.transaction_depth); + + if transaction_depth == 0 { + self.start_transaction(); + } let res = (|| { let version = #crate_::CallApiAt::<__SrApiBlock__>::runtime_version_at( @@ -494,14 +504,33 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { at, )?; + match &mut *std::cell::RefCell::borrow_mut(&self.extensions_generated_for) { + Some(generated_for) => { + if *generated_for != at { + return std::result::Result::Err( + #crate_::ApiError::UsingSameInstanceForDifferentBlocks + ) + } + }, + generated_for @ None => { + #crate_::CallApiAt::<__SrApiBlock__>::initialize_extensions( + self.call, + at, + &mut std::cell::RefCell::borrow_mut(&self.extensions), + )?; + + *generated_for = Some(at); + } + } + let params = #crate_::CallApiAtParams { at, function: (*fn_name)(version), arguments: params, overlayed_changes: &self.changes, - storage_transaction_cache: &self.storage_transaction_cache, - context, + call_context: self.call_context, recorder: &self.recorder, + extensions: &self.extensions, }; #crate_::CallApiAt::<__SrApiBlock__>::call_api_at( @@ -510,7 +539,9 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { ) })(); - self.commit_or_rollback(std::result::Result::is_ok(&res)); + if transaction_depth == 0 { + self.commit_or_rollback_transaction(std::result::Result::is_ok(&res)); + } res } @@ -553,7 +584,7 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { where_clause.predicates.push(parse_quote! { RuntimeApiImplCall::StateBackend: - #crate_::StateBackend<#crate_::HashFor<__SrApiBlock__>> + #crate_::StateBackend<#crate_::HashingFor<__SrApiBlock__>> }); where_clause.predicates.push(parse_quote! { &'static RuntimeApiImplCall: Send }); @@ -571,10 +602,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { input.attrs = filter_cfg_attrs(&input.attrs); - // The implementation for the `RuntimeApiImpl` is only required when compiling with - // the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - fold::fold_item_impl(self, input) } } @@ -595,7 +622,10 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result result.push(processed_impl); } - Ok(quote!( #( #result )* )) + + let crate_ = generate_crate_access(); + + Ok(quote!( #crate_::std_enabled! { #( #result )* } )) } fn populate_runtime_api_versions( @@ -612,13 +642,14 @@ fn populate_runtime_api_versions( )); sections.push(quote!( - #( #attrs )* - const _: () = { - // All sections with the same name are going to be merged by concatenation. - #[cfg(not(feature = "std"))] - #[link_section = "runtime_apis"] - static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); - }; + #crate_access::std_disabled! { + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); + }; + } )); } @@ -687,7 +718,11 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let runtime_api_versions = generate_runtime_api_versions(api_impls)?; let wasm_interface = generate_wasm_interface(api_impls)?; let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; - let runtime_metadata = generate_impl_runtime_metadata(api_impls)?; + + #[cfg(feature = "frame-metadata")] + let runtime_metadata = crate::runtime_metadata::generate_impl_runtime_metadata(api_impls)?; + #[cfg(not(feature = "frame-metadata"))] + let runtime_metadata = quote!(); let impl_ = quote!( #base_runtime_api diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index d34f4b7f9cf6a..06e148880e975 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -25,6 +25,7 @@ mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; +#[cfg(feature = "frame-metadata")] mod runtime_metadata; mod utils; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index be8c8ca0f8527..c1339ff6621b3 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -66,8 +66,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result for #self_ty { - type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; - fn execute_in_transaction #crate_::TransactionOutcome, R>( &self, call: F, @@ -111,23 +109,30 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result>>( &self, - _: &Self::StateBackend, + _: &B, _: <#block_type as #crate_::BlockT>::Hash, ) -> std::result::Result< - #crate_::StorageChanges, + #crate_::StorageChanges<#block_type>, String > where Self: Sized { unimplemented!("`into_storage_changes` not implemented for runtime api mocks") } + + fn set_call_context(&mut self, _: #crate_::CallContext) { + unimplemented!("`set_call_context` not implemented for runtime api mocks") + } + + fn register_extension(&mut self, _: E) { + unimplemented!("`register_extension` not implemented for runtime api mocks") + } } impl #crate_::Core<#block_type> for #self_ty { fn __runtime_api_internal_call_api_at( &self, _: <#block_type as #crate_::BlockT>::Hash, - _: #crate_::ExecutionContext, _: std::vec::Vec, _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, ) -> std::result::Result, #crate_::ApiError> { @@ -141,14 +146,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result::Hash, - _: #crate_::ExecutionContext, - ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { - unimplemented!("`Core::version` not implemented for runtime api mocks") - } - fn execute_block( &self, _: <#block_type as #crate_::BlockT>::Hash, @@ -157,15 +154,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result::Hash, - _: #crate_::ExecutionContext, - _: #block_type, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::execute_block` not implemented for runtime api mocks") - } - fn initialize_block( &self, _: <#block_type as #crate_::BlockT>::Hash, @@ -173,15 +161,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result std::result::Result<(), #crate_::ApiError> { unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } - - fn initialize_block_with_context( - &self, - _: <#block_type as #crate_::BlockT>::Hash, - _: #crate_::ExecutionContext, - _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") - } } )) } @@ -255,26 +234,12 @@ impl<'a> FoldRuntimeApiImpl<'a> { let crate_ = generate_crate_access(); - // We also need to overwrite all the `_with_context` methods. To do this, - // we clone all methods and add them again with the new name plus one more argument. - impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { - if let syn::ImplItem::Fn(mut m) = i { - m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); - m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); - - Some(m.into()) - } else { - None - } - })); - let block_type = self.block_type; impl_item.items.push(parse_quote! { fn __runtime_api_internal_call_api_at( &self, _: <#block_type as #crate_::BlockT>::Hash, - _: #crate_::ExecutionContext, _: std::vec::Vec, _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, ) -> std::result::Result, #crate_::ApiError> { diff --git a/primitives/api/proc-macro/src/runtime_metadata.rs b/primitives/api/proc-macro/src/runtime_metadata.rs index ae78fb52dbd5a..41849401291e6 100644 --- a/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/primitives/api/proc-macro/src/runtime_metadata.rs @@ -30,13 +30,13 @@ use crate::{ /// Get the type parameter argument without lifetime or mutability /// of a runtime metadata function. /// -/// In the following example, both the `AccountId` and `Index` generic +/// In the following example, both the `AccountId` and `Nonce` generic /// type parameters must implement `scale_info::TypeInfo` because they /// are added into the metadata using `scale_info::meta_type`. /// /// ```ignore -/// trait ExampleAccountNonceApi { -/// fn account_nonce<'a>(account: &'a AccountId) -> Index; +/// trait ExampleAccountNonceApi { +/// fn account_nonce<'a>(account: &'a AccountId) -> Nonce; /// } /// ``` /// @@ -88,9 +88,7 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { let mut where_clause = Vec::new(); for item in &decl.items { // Collect metadata for methods only. - let syn::TraitItem::Fn(method) = item else { - continue - }; + let syn::TraitItem::Fn(method) = item else { continue }; // Collect metadata only for the latest methods. let is_changed_in = @@ -103,9 +101,7 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { let signature = &method.sig; for input in &signature.inputs { // Exclude `self` from metadata collection. - let syn::FnArg::Typed(typed) = input else { - continue - }; + let syn::FnArg::Typed(typed) = input else { continue }; let pat = &typed.pat; let name = quote!(#pat).to_string(); @@ -153,24 +149,25 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { // The trait generics where already extended with `Block: BlockT`. let mut generics = decl.generics.clone(); for generic_param in generics.params.iter_mut() { - let syn::GenericParam::Type(ty) = generic_param else { - continue - }; + let syn::GenericParam::Type(ty) = generic_param else { continue }; // Default type parameters are not allowed in functions. ty.eq_token = None; ty.default = None; } - let where_clause = where_clause - .iter() - .map(|ty| quote!(#ty: #crate_::scale_info::TypeInfo + 'static)); + where_clause + .into_iter() + .map(|ty| parse_quote!(#ty: #crate_::scale_info::TypeInfo + 'static)) + .for_each(|w| generics.make_where_clause().predicates.push(w)); + + let (impl_generics, _, where_clause) = generics.split_for_impl(); quote!( #( #attrs )* #[inline(always)] - pub fn runtime_metadata #generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR - where #( #where_clause, )* + pub fn runtime_metadata #impl_generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR + #where_clause { #crate_::metadata_ir::RuntimeApiMetadataIR { name: #trait_name, diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 6716be142febc..c9389154bbf40 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -22,7 +22,7 @@ use syn::{ ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::{format_ident, quote, ToTokens}; +use quote::{format_ident, quote}; use proc_macro_crate::{crate_name, FoundCrate}; @@ -259,13 +259,14 @@ pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { } /// Extract the documentation from the provided attributes. +#[cfg(feature = "frame-metadata")] pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { + use quote::ToTokens; + attrs .iter() .filter_map(|attr| { - let syn::Meta::NameValue(meta) = &attr.meta else { - return None - }; + let syn::Meta::NameValue(meta) = &attr.meta else { return None }; let Ok(lit) = syn::parse2::(meta.value.to_token_stream()) else { unreachable!("non-lit doc attribute values do not exist"); }; @@ -275,6 +276,7 @@ pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { } /// Filters all attributes except the cfg ones. +#[cfg(feature = "frame-metadata")] pub fn filter_cfg_attributes(attrs: &[syn::Attribute]) -> Vec { attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect() } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 02770280f7b90..e575f6b9bbff5 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -78,12 +78,19 @@ pub use hash_db::Hasher; #[doc(hidden)] pub use scale_info; #[doc(hidden)] +pub use sp_core::offchain; +#[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; +#[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; #[doc(hidden)] -pub use sp_core::{offchain, ExecutionContext}; +#[cfg(feature = "std")] +pub use sp_externalities::{Extension, Extensions}; #[doc(hidden)] +#[cfg(feature = "frame-metadata")] pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; #[doc(hidden)] #[cfg(feature = "std")] @@ -91,10 +98,7 @@ pub use sp_runtime::StateVersion; #[doc(hidden)] pub use sp_runtime::{ generic::BlockId, - traits::{ - Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, Hash as HashT, HashFor, - Header as HeaderT, NumberFor, - }, + traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, transaction_validity::TransactionValidity, RuntimeString, TransactionOutcome, }; @@ -263,15 +267,12 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// ```rust /// use sp_version::create_runtime_str; /// # -/// # use sp_runtime::traits::{GetNodeBlockType, Block as BlockT}; +/// # use sp_runtime::traits::Block as BlockT; /// # use sp_test_primitives::Block; /// # -/// # /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// # /// trait are done by the `construct_runtime!` macro in a real runtime. +/// # /// The declaration of the `Runtime` type is done by the `construct_runtime!` macro +/// # /// in a real runtime. /// # pub struct Runtime {} -/// # impl GetNodeBlockType for Runtime { -/// # type NodeBlock = Block; -/// # } /// # /// # sp_api::decl_runtime_apis! { /// # /// Declare the api trait. @@ -453,30 +454,10 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_trie::recorder::Recorder>; - -/// A type that is used as cache for the storage transactions. -#[cfg(feature = "std")] -pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< - >>::Transaction, - HashFor, ->; - -#[cfg(feature = "std")] -pub type StorageChanges = sp_state_machine::StorageChanges< - >>::Transaction, - HashFor, ->; +pub type ProofRecorder = sp_trie::recorder::Recorder>; -/// Extract the state backend type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] -pub type StateBackendFor = - <

>::Api as ApiExt>::StateBackend; - -/// Extract the state backend transaction type for a type that implements `ProvideRuntimeApi`. -#[cfg(feature = "std")] -pub type TransactionFor = - as StateBackend>>::Transaction; +pub type StorageChanges = sp_state_machine::StorageChanges>; /// Something that can be constructed to a runtime api. #[cfg(feature = "std")] @@ -523,14 +504,13 @@ pub enum ApiError { Application(#[from] Box), #[error("Api called for an unknown Block: {0}")] UnknownBlock(String), + #[error("Using the same api instance to call into multiple independent blocks.")] + UsingSameInstanceForDifferentBlocks, } /// Extends the runtime api implementation with some common functionality. #[cfg(feature = "std")] pub trait ApiExt { - /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; - /// Execute the given closure inside a new transaction. /// /// Depending on the outcome of the closure, the transaction is committed or rolled-back. @@ -579,18 +559,24 @@ pub trait ApiExt { /// api functions. /// /// After executing this function, all collected changes are reset. - fn into_storage_changes( + fn into_storage_changes>>( &self, - backend: &Self::StateBackend, + backend: &B, parent_hash: Block::Hash, - ) -> Result, String> + ) -> Result, String> where Self: Sized; + + /// Set the [`CallContext`] to be used by the runtime api calls done by this instance. + fn set_call_context(&mut self, call_context: CallContext); + + /// Register an [`Extension`] that will be accessible while executing a runtime api call. + fn register_extension(&mut self, extension: E); } /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend>> { +pub struct CallApiAtParams<'a, Block: BlockT> { /// The block id that determines the state that should be setup when calling the function. pub at: Block::Hash, /// The name of the function that should be called. @@ -598,33 +584,37 @@ pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend, /// The overlayed changes that are on top of the state. - pub overlayed_changes: &'a RefCell, - /// The cache for storage transactions. - pub storage_transaction_cache: &'a RefCell>, - /// The context this function is executed in. - pub context: ExecutionContext, + pub overlayed_changes: &'a RefCell>>, + /// The call context of this call. + pub call_context: CallContext, /// The optional proof recorder for recording storage accesses. pub recorder: &'a Option>, + /// The extensions that should be used for this call. + pub extensions: &'a RefCell, } /// Something that can call into the an api at a given block. #[cfg(feature = "std")] pub trait CallApiAt { /// The state backend that is used to store the block states. - type StateBackend: StateBackend> + AsTrieBackend>; + type StateBackend: StateBackend> + AsTrieBackend>; /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. - fn call_api_at( - &self, - params: CallApiAtParams, - ) -> Result, ApiError>; + fn call_api_at(&self, params: CallApiAtParams) -> Result, ApiError>; /// Returns the runtime version at the given block. fn runtime_version_at(&self, at_hash: Block::Hash) -> Result; /// Get the state `at` the given block. fn state_at(&self, at: Block::Hash) -> Result; + + /// Initialize the `extensions` for the given block `at` by using the global extensions factory. + fn initialize_extensions( + &self, + at: Block::Hash, + extensions: &mut Extensions, + ) -> Result<(), ApiError>; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. @@ -750,3 +740,6 @@ decl_runtime_apis! { fn metadata_versions() -> sp_std::vec::Vec; } } + +sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); +sp_core::generate_feature_enabled_macro!(std_disabled, not(feature = "std"), $); diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 5b6c144ef3f6b..87c5eb6199efe 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -14,27 +14,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "4.0.0-dev", path = "../" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-version = { version = "5.0.0", path = "../../version" } -sp-tracing = { version = "6.0.0", path = "../../tracing" } -sp-runtime = { version = "7.0.0", path = "../../runtime" } +sp-version = { version = "22.0.0", path = "../../version" } +sp-tracing = { version = "10.0.0", path = "../../tracing" } +sp-runtime = { version = "24.0.0", path = "../../runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "3.2.2" } -sp-state-machine = { version = "0.13.0", path = "../../state-machine" } +codec = { package = "parity-scale-codec", version = "3.6.1" } +sp-state-machine = { version = "0.28.0", path = "../../state-machine" } trybuild = "1.0.74" rustversion = "1.0.6" +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } [dev-dependencies] criterion = "0.4.0" futures = "0.3.21" log = "0.4.17" -sp-core = { version = "7.0.0", path = "../../core" } +sp-core = { version = "21.0.0", path = "../../core" } +static_assertions = "1.1.0" [[bench]] name = "bench" harness = false - -# We only need this to generate the correct code. -[features] -default = [ "std" ] -std = [] diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 88ebdbc6134aa..45bea08af6ded 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -17,7 +17,6 @@ use criterion::{criterion_group, criterion_main, Criterion}; use sp_api::ProvideRuntimeApi; -use sp_state_machine::ExecutionStrategy; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; @@ -56,17 +55,13 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("calling function by function pointer in wasm", |b| { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .build(); + let client = TestClientBuilder::new().build(); let best_hash = client.chain_info().best_hash; b.iter(|| client.runtime_api().benchmark_indirect_call(best_hash).unwrap()) }); - c.bench_function("calling function in wasm", |b| { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .build(); + c.bench_function("calling function", |b| { + let client = TestClientBuilder::new().build(); let best_hash = client.chain_info().best_hash; b.iter(|| client.runtime_api().benchmark_direct_call(best_hash).unwrap()) }); diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index f07adbfa709b5..274f80bd1b465 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -18,16 +18,13 @@ use sp_api::{ decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::{Block, Hash}; -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. pub struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index a591bc0b77938..353be73dcccda 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -15,15 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_api::{Core, ProvideRuntimeApi}; -use sp_runtime::traits::{HashFor, Header as HeaderT}; -use sp_state_machine::{ - create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy, +use std::panic::UnwindSafe; + +use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_runtime::{ + traits::{HashingFor, Header as HeaderT}, + TransactionOutcome, }; +use sp_state_machine::{create_proof_check_backend, execution_proof_check_on_trie_backend}; + use substrate_test_runtime_client::{ prelude::*, runtime::{Block, Header, TestAPI, Transfer}, - DefaultTestClientBuilderExt, TestClientBuilder, + DefaultTestClientBuilderExt, TestClient, TestClientBuilder, }; use codec::Encode; @@ -31,29 +35,18 @@ use sc_block_builder::BlockBuilderProvider; use sp_consensus::SelectChain; use substrate_test_runtime_client::sc_executor::WasmExecutor; -fn calling_function_with_strat(strat: ExecutionStrategy) { - let client = TestClientBuilder::new().set_execution_strategy(strat).build(); +#[test] +fn calling_runtime_function() { + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; assert_eq!(runtime_api.benchmark_add_one(best_hash, &1).unwrap(), 2); } -#[test] -fn calling_native_runtime_function() { - calling_function_with_strat(ExecutionStrategy::NativeWhenPossible); -} - -#[test] -fn calling_wasm_runtime_function() { - calling_function_with_strat(ExecutionStrategy::AlwaysWasm); -} - #[test] fn calling_native_runtime_signature_changed_function() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; @@ -62,9 +55,7 @@ fn calling_native_runtime_signature_changed_function() { #[test] fn use_trie_function() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; assert_eq!(runtime_api.use_trie(best_hash).unwrap(), 2); @@ -72,7 +63,7 @@ fn use_trie_function() { #[test] fn initialize_block_works() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; runtime_api @@ -92,9 +83,7 @@ fn initialize_block_works() { #[test] fn record_proof_works() { - let (client, longest_chain) = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build_with_longest_chain(); + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let storage_root = *futures::executor::block_on(longest_chain.best_chain()).unwrap().state_root(); @@ -122,7 +111,7 @@ fn record_proof_works() { builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::>( + let backend = create_proof_check_backend::>( storage_root, proof.expect("Proof was generated"), ) @@ -146,7 +135,7 @@ fn record_proof_works() { #[test] fn call_runtime_api_with_multiple_arguments() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); + let client = TestClientBuilder::new().build(); let data = vec![1, 2, 4, 5, 6, 7, 8, 8, 10, 12]; let best_hash = client.chain_info().best_hash; @@ -161,8 +150,7 @@ fn disable_logging_works() { if std::env::var("RUN_TEST").is_ok() { sp_tracing::try_init_simple(); - let mut builder = - TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm); + let mut builder = TestClientBuilder::new(); builder.genesis_init_mut().set_wasm_code( substrate_test_runtime_client::runtime::wasm_binary_logging_disabled_unwrap().to_vec(), ); @@ -187,3 +175,46 @@ fn disable_logging_works() { assert!(output.contains("Logging from native works")); } } + +// Certain logic like the transaction handling is not unwind safe. +// +// Ensure that the type is not unwind safe! +static_assertions::assert_not_impl_any!(>::Api: UnwindSafe); + +#[test] +fn ensure_transactional_works() { + const KEY: &[u8] = b"test"; + + let client = TestClientBuilder::new().build(); + let best_hash = client.chain_info().best_hash; + + let runtime_api = client.runtime_api(); + runtime_api.execute_in_transaction(|api| { + api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], false).unwrap(); + + api.execute_in_transaction(|api| { + api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3, 4], false).unwrap(); + + TransactionOutcome::Commit(()) + }); + + TransactionOutcome::Commit(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3, 4])); + + let runtime_api = client.runtime_api(); + runtime_api.execute_in_transaction(|api| { + assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], true).is_err()); + + TransactionOutcome::Commit(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3])); +} diff --git a/primitives/api/test/tests/ui/changed_in_no_default_method.rs b/primitives/api/test/tests/ui/changed_in_no_default_method.rs index 6af183a4cde91..a0bb4e2830ca7 100644 --- a/primitives/api/test/tests/ui/changed_in_no_default_method.rs +++ b/primitives/api/test/tests/ui/changed_in_no_default_method.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { #[api_version(2)] diff --git a/primitives/api/test/tests/ui/changed_in_no_default_method.stderr b/primitives/api/test/tests/ui/changed_in_no_default_method.stderr index 096b1091e6f41..2140703a5d2c2 100644 --- a/primitives/api/test/tests/ui/changed_in_no_default_method.stderr +++ b/primitives/api/test/tests/ui/changed_in_no_default_method.stderr @@ -1,6 +1,6 @@ error: There is no 'default' method with this name (without `changed_in` attribute). The 'default' method is used to call into the latest implementation. - --> tests/ui/changed_in_no_default_method.rs:15:6 - | -15 | fn test(data: u64); - | ^^^^ + --> tests/ui/changed_in_no_default_method.rs:9:6 + | +9 | fn test(data: u64); + | ^^^^ diff --git a/primitives/api/test/tests/ui/changed_in_unknown_version.rs b/primitives/api/test/tests/ui/changed_in_unknown_version.rs index 151f3e5f4d637..164b91d19422c 100644 --- a/primitives/api/test/tests/ui/changed_in_unknown_version.rs +++ b/primitives/api/test/tests/ui/changed_in_unknown_version.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/changed_in_unknown_version.stderr b/primitives/api/test/tests/ui/changed_in_unknown_version.stderr index cf03ee4530ab7..d4a03bab552e9 100644 --- a/primitives/api/test/tests/ui/changed_in_unknown_version.stderr +++ b/primitives/api/test/tests/ui/changed_in_unknown_version.stderr @@ -1,5 +1,5 @@ error: `changed_in` version can not be greater than the `api_version` - --> $DIR/changed_in_unknown_version.rs:14:3 - | -14 | fn test(data: u64); - | ^^ + --> tests/ui/changed_in_unknown_version.rs:8:3 + | +8 | fn test(data: u64); + | ^^ diff --git a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.rs b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.rs index 93343fb72ab5d..68d84d97fa8e2 100644 --- a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.rs +++ b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr index bf201e8b55a78..96ec09a185544 100644 --- a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr @@ -1,7 +1,7 @@ error: No api implementation given! - --> $DIR/empty_impl_runtime_apis_call.rs:17:1 + --> tests/ui/empty_impl_runtime_apis_call.rs:11:1 | -17 | sp_api::impl_runtime_apis! {} +11 | sp_api::impl_runtime_apis! {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs b/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs index 19dfdda836236..32501be7855c6 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs @@ -1,12 +1,9 @@ -use sp_runtime::traits::{GetNodeBlockType, Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index ef82582079211..2324be85be4f8 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -1,35 +1,35 @@ error[E0053]: method `test` has an incompatible type for trait - --> tests/ui/impl_incorrect_method_signature.rs:19:17 + --> tests/ui/impl_incorrect_method_signature.rs:16:17 | -19 | fn test(data: String) {} +16 | fn test(data: String) {} | ^^^^^^ | | | expected `u64`, found `std::string::String` | help: change the parameter type to match the trait: `u64` | note: type in trait - --> tests/ui/impl_incorrect_method_signature.rs:13:17 + --> tests/ui/impl_incorrect_method_signature.rs:10:17 | -13 | fn test(data: u64); +10 | fn test(data: u64); | ^^^ = note: expected signature `fn(u64)` found signature `fn(std::string::String)` error[E0308]: mismatched types - --> tests/ui/impl_incorrect_method_signature.rs:19:11 + --> tests/ui/impl_incorrect_method_signature.rs:16:11 | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} +14 | / sp_api::impl_runtime_apis! { +15 | | impl self::Api for Runtime { +16 | | fn test(data: String) {} | | ^^^^ expected `u64`, found `String` -20 | | } +17 | | } ... | -32 | | } -33 | | } +29 | | } +30 | | } | |_- arguments to this function are incorrect | note: associated function defined here - --> tests/ui/impl_incorrect_method_signature.rs:13:6 + --> tests/ui/impl_incorrect_method_signature.rs:10:6 | -13 | fn test(data: u64); +10 | fn test(data: u64); | ^^^^ diff --git a/primitives/api/test/tests/ui/impl_missing_version.rs b/primitives/api/test/tests/ui/impl_missing_version.rs index 63e0599622ac9..8fd40a400922f 100644 --- a/primitives/api/test/tests/ui/impl_missing_version.rs +++ b/primitives/api/test/tests/ui/impl_missing_version.rs @@ -1,10 +1,7 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { #[api_version(2)] diff --git a/primitives/api/test/tests/ui/impl_missing_version.stderr b/primitives/api/test/tests/ui/impl_missing_version.stderr index b8ecc466c7fcd..770543aa8875d 100644 --- a/primitives/api/test/tests/ui/impl_missing_version.stderr +++ b/primitives/api/test/tests/ui/impl_missing_version.stderr @@ -1,14 +1,8 @@ -error[E0433]: failed to resolve: could not find `ApiV4` in `runtime_decl_for_api` - --> tests/ui/impl_missing_version.rs:21:13 - | -21 | impl self::Api for Runtime { - | ^^^ could not find `ApiV4` in `runtime_decl_for_api` - error[E0405]: cannot find trait `ApiV4` in module `self::runtime_decl_for_api` - --> tests/ui/impl_missing_version.rs:21:13 + --> tests/ui/impl_missing_version.rs:18:13 | -11 | pub trait Api { +8 | pub trait Api { | ------------- similarly named trait `ApiV2` defined here ... -21 | impl self::Api for Runtime { +18 | impl self::Api for Runtime { | ^^^ help: a trait with a similar name exists: `ApiV2` diff --git a/primitives/api/test/tests/ui/impl_two_traits_with_same_name.rs b/primitives/api/test/tests/ui/impl_two_traits_with_same_name.rs index 76555a825dc84..cb8f2f493d741 100644 --- a/primitives/api/test/tests/ui/impl_two_traits_with_same_name.rs +++ b/primitives/api/test/tests/ui/impl_two_traits_with_same_name.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr b/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr index 17ee56d409a66..a41f59f36b1f0 100644 --- a/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr +++ b/primitives/api/test/tests/ui/impl_two_traits_with_same_name.stderr @@ -1,5 +1,5 @@ error: Two traits with the same name detected! The trait name is used to generate its ID. Please rename one trait at the declaration! - --> $DIR/impl_two_traits_with_same_name.rs:30:15 + --> tests/ui/impl_two_traits_with_same_name.rs:24:15 | -30 | impl second::Api for Runtime { +24 | impl second::Api for Runtime { | ^^^ diff --git a/primitives/api/test/tests/ui/missing_block_generic_parameter.rs b/primitives/api/test/tests/ui/missing_block_generic_parameter.rs index e194fdbf4ab65..b69505bfeb098 100644 --- a/primitives/api/test/tests/ui/missing_block_generic_parameter.rs +++ b/primitives/api/test/tests/ui/missing_block_generic_parameter.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/missing_block_generic_parameter.stderr b/primitives/api/test/tests/ui/missing_block_generic_parameter.stderr index d626eda2496f4..5dc2b993bb1a7 100644 --- a/primitives/api/test/tests/ui/missing_block_generic_parameter.stderr +++ b/primitives/api/test/tests/ui/missing_block_generic_parameter.stderr @@ -1,5 +1,5 @@ error: Missing `Block` generic parameter. - --> $DIR/missing_block_generic_parameter.rs:18:13 + --> tests/ui/missing_block_generic_parameter.rs:12:13 | -18 | impl self::Api for Runtime { +12 | impl self::Api for Runtime { | ^^^ diff --git a/primitives/api/test/tests/ui/missing_path_for_trait.rs b/primitives/api/test/tests/ui/missing_path_for_trait.rs index d7540ce88a8a1..e47bca1c3f6ce 100644 --- a/primitives/api/test/tests/ui/missing_path_for_trait.rs +++ b/primitives/api/test/tests/ui/missing_path_for_trait.rs @@ -1,12 +1,6 @@ -use sp_runtime::traits::GetNodeBlockType; -use substrate_test_runtime_client::runtime::Block; - -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/missing_path_for_trait.stderr b/primitives/api/test/tests/ui/missing_path_for_trait.stderr index 729ff0bad18d8..cca993501979e 100644 --- a/primitives/api/test/tests/ui/missing_path_for_trait.stderr +++ b/primitives/api/test/tests/ui/missing_path_for_trait.stderr @@ -1,5 +1,5 @@ error: The implemented trait has to be referenced with a path, e.g. `impl client::Core for Runtime`. - --> $DIR/missing_path_for_trait.rs:18:7 + --> tests/ui/missing_path_for_trait.rs:12:7 | -18 | impl Api for Runtime { +12 | impl Api for Runtime { | ^^^ diff --git a/primitives/api/test/tests/ui/missing_versioned_method.rs b/primitives/api/test/tests/ui/missing_versioned_method.rs index d973a94c2101d..919cef055fe62 100644 --- a/primitives/api/test/tests/ui/missing_versioned_method.rs +++ b/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -1,10 +1,7 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { #[api_version(2)] diff --git a/primitives/api/test/tests/ui/missing_versioned_method.stderr b/primitives/api/test/tests/ui/missing_versioned_method.stderr index e3ace7979c27e..b88d903212df1 100644 --- a/primitives/api/test/tests/ui/missing_versioned_method.stderr +++ b/primitives/api/test/tests/ui/missing_versioned_method.stderr @@ -1,8 +1,8 @@ error[E0046]: not all trait items implemented, missing: `test3` - --> tests/ui/missing_versioned_method.rs:21:2 + --> tests/ui/missing_versioned_method.rs:18:2 | -15 | fn test3(); +12 | fn test3(); | ----------- `test3` from trait ... -21 | impl self::Api for Runtime { +18 | impl self::Api for Runtime { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs index 72358b99164d5..036bba417f57d 100644 --- a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -1,10 +1,7 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { #[api_version(2)] diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr index 7354fbd537fd7..4afa6856a5814 100644 --- a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr @@ -1,8 +1,8 @@ error[E0046]: not all trait items implemented, missing: `test3` - --> tests/ui/missing_versioned_method_multiple_vers.rs:23:2 + --> tests/ui/missing_versioned_method_multiple_vers.rs:20:2 | -15 | fn test3(); +12 | fn test3(); | ----------- `test3` from trait ... -23 | impl self::Api for Runtime { +20 | impl self::Api for Runtime { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 430f63eee1660..f088e8f2de59d 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -48,42 +48,3 @@ error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api: | |_^ expected 3 parameters, found 2 | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4 - --> tests/ui/mock_only_self_reference.rs:12:1 - | -3 | / sp_api::decl_runtime_apis! { -4 | | pub trait Api { -5 | | fn test(data: u64); - | |_________________________- trait requires 4 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { -13 | | impl Api for MockApi { -14 | | fn test(self, data: u64) {} -15 | | -16 | | fn test2(&mut self, data: u64) {} -17 | | } -18 | | } - | |_^ expected 4 parameters, found 3 - | - = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4 - --> tests/ui/mock_only_self_reference.rs:12:1 - | -3 | / sp_api::decl_runtime_apis! { -4 | | pub trait Api { -5 | | fn test(data: u64); -6 | | fn test2(data: u64); - | |__________________________- trait requires 4 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { -13 | | impl Api for MockApi { -14 | | fn test(self, data: u64) {} -15 | | -16 | | fn test2(&mut self, data: u64) {} -17 | | } -18 | | } - | |_^ expected 4 parameters, found 3 - | - = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs b/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs new file mode 100644 index 0000000000000..b572a3bc30d5d --- /dev/null +++ b/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs @@ -0,0 +1,43 @@ +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::traits::Block as BlockT; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} + +pub trait CustomTrait: Encode + Decode + TypeInfo {} + +#[derive(Encode, Decode, TypeInfo)] +pub struct SomeImpl; +impl CustomTrait for SomeImpl {} + +#[derive(Encode, Decode, TypeInfo)] +pub struct SomeOtherType(C); + +sp_api::decl_runtime_apis! { + pub trait Api where A: CustomTrait { + fn test() -> A; + fn test2() -> SomeOtherType; + } +} + +sp_api::impl_runtime_apis! { + impl self::Api for Runtime { + fn test() -> SomeImpl { SomeImpl } + fn test2() -> SomeOtherType { SomeOtherType(SomeImpl) } + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/primitives/api/test/tests/ui/positive_cases/default_impls.rs index 3434db1089f05..58192feb9ecac 100644 --- a/primitives/api/test/tests/ui/positive_cases/default_impls.rs +++ b/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -1,10 +1,7 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { #[api_version(2)] diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs index a82fb9a1595db..14a8fa4d4e0b1 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs @@ -1,12 +1,9 @@ -use sp_runtime::traits::{GetNodeBlockType, Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use substrate_test_runtime_client::runtime::Block; -/// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// trait are done by the `construct_runtime!` macro in a real runtime. +/// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real +/// runtime. struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} sp_api::decl_runtime_apis! { pub trait Api { diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 1515bd3a1208f..e9d550f3a3bcf 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -1,39 +1,39 @@ error[E0053]: method `test` has an incompatible type for trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:17 + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:16:17 | -19 | fn test(data: &u64) { +16 | fn test(data: &u64) { | ^^^^ | | | expected `u64`, found `&u64` | help: change the parameter type to match the trait: `u64` | note: type in trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:13:17 + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:10:17 | -13 | fn test(data: u64); +10 | fn test(data: u64); | ^^^ = note: expected signature `fn(u64)` found signature `fn(&u64)` error[E0308]: mismatched types - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:16:11 | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { +14 | / sp_api::impl_runtime_apis! { +15 | | impl self::Api for Runtime { +16 | | fn test(data: &u64) { | | ^^^^^^^ expected `u64`, found `&u64` -20 | | unimplemented!() +17 | | unimplemented!() ... | -34 | | } -35 | | } +31 | | } +32 | | } | |_- arguments to this function are incorrect | note: associated function defined here - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:13:6 + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:10:6 | -13 | fn test(data: u64); +10 | fn test(data: u64); | ^^^^ help: consider removing the borrow | -19 | fn test(data: &u64) { +16 | fn test(data: &u64) { | diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index dc97948fdf5c8..f27ec881e1961 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "7.0.0" +version = "23.0.0" authors = ["Parity Technologies "] edition = "2021" description = "Provides facilities for generating application specific crypto wrapper types." @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, optional = true, features = ["derive", "alloc"] } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-io = { version = "7.0.0", default-features = false, path = "../io" } +serde = { version = "1.0.163", default-features = false, optional = true, features = ["derive", "alloc"] } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-io = { version = "23.0.0", default-features = false, path = "../io" } [features] default = [ "std" ] @@ -51,3 +51,19 @@ full_crypto = [ "sp-io/disable_panic_handler", "sp-io/disable_oom", ] + +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bls-experimental = [ + "sp-core/bls-experimental", + "sp-io/bls-experimental", +] + +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = [ + "sp-core/bandersnatch-experimental", + "sp-io/bandersnatch-experimental", +] diff --git a/primitives/application-crypto/src/bandersnatch.rs b/primitives/application-crypto/src/bandersnatch.rs new file mode 100644 index 0000000000000..fc7383815d702 --- /dev/null +++ b/primitives/application-crypto/src/bandersnatch.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Bandersnatch VRF application crypto types. + +use crate::{KeyTypeId, RuntimePublic}; +pub use sp_core::bandersnatch::*; +use sp_std::vec::Vec; + +mod app { + crate::app_crypto!(super, sp_core::testing::BANDERSNATCH); +} + +#[cfg(feature = "full_crypto")] +pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; + +impl RuntimePublic for Public { + type Signature = Signature; + + /// Dummy implementation. Returns an empty vector. + fn all(_key_type: KeyTypeId) -> Vec { + Vec::new() + } + + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::bandersnatch_generate(key_type, seed) + } + + /// Dummy implementation. Returns `None`. + fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { + None + } + + /// Dummy implementation. Returns `false`. + fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { + false + } + + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::ByteArray::to_raw_vec(self) + } +} diff --git a/primitives/application-crypto/src/bls377.rs b/primitives/application-crypto/src/bls377.rs index 7fbbec5466058..ee17060564fa8 100644 --- a/primitives/application-crypto/src/bls377.rs +++ b/primitives/application-crypto/src/bls377.rs @@ -16,6 +16,7 @@ // limitations under the License. //! BLS12-377 crypto applications. +use crate::{KeyTypeId, RuntimePublic}; pub use sp_core::bls::bls377::*; @@ -26,3 +27,30 @@ mod app { #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; pub use app::{Public as AppPublic, Signature as AppSignature}; + +impl RuntimePublic for Public { + type Signature = Signature; + + /// Dummy implementation. Returns an empty vector. + fn all(_key_type: KeyTypeId) -> Vec { + Vec::new() + } + + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::bls377_generate(key_type, seed) + } + + /// Dummy implementation. Returns `None`. + fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { + None + } + + /// Dummy implementation. Returns `false`. + fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { + false + } + + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::ByteArray::to_raw_vec(self) + } +} diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 11be54e29c702..5384220bc9ca3 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -32,9 +32,6 @@ pub use sp_core::{ crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, UncheckedFrom, Wraps}, RuntimeDebug, }; -#[doc(hidden)] -#[cfg(all(not(feature = "std"), feature = "serde"))] -pub use sp_std::alloc::{format, string::String}; #[doc(hidden)] pub use codec; @@ -46,6 +43,8 @@ pub use serde; #[doc(hidden)] pub use sp_std::{ops::Deref, vec::Vec}; +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; #[cfg(feature = "bls-experimental")] pub mod bls377; #[cfg(feature = "bls-experimental")] @@ -182,6 +181,13 @@ macro_rules! app_crypto_pair { impl $crate::AppPair for Pair { type Generic = $pair; } + + impl Pair { + /// Convert into wrapped generic key pair type. + pub fn into_inner(self) -> $pair { + self.0 + } + } }; } @@ -328,6 +334,14 @@ macro_rules! app_crypto_public_common { }; } +#[doc(hidden)] +pub mod module_format_string_prelude { + #[cfg(all(not(feature = "std"), feature = "serde"))] + pub use sp_std::alloc::{format, string::String}; + #[cfg(feature = "std")] + pub use std::{format, string::String}; +} + /// Implements traits for the public key type if `feature = "serde"` is enabled. #[cfg(feature = "serde")] #[doc(hidden)] @@ -365,9 +379,7 @@ macro_rules! app_crypto_public_common_if_serde { where D: $crate::serde::Deserializer<'de>, { - use $crate::Ss58Codec; - #[cfg(all(not(feature = "std"), feature = "serde"))] - use $crate::{format, String}; + use $crate::{module_format_string_prelude::*, Ss58Codec}; Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index 88d4bf36915d0..e9b1080f63d9c 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -23,7 +23,7 @@ use sp_core::crypto::Pair; use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; use sp_std::{fmt::Debug, vec::Vec}; -/// An application-specific cryptographic object. +/// Application-specific cryptographic object. /// /// Combines all the core types and constants that are defined by a particular /// cryptographic scheme when it is used in a specific application domain. @@ -31,7 +31,7 @@ use sp_std::{fmt::Debug, vec::Vec}; /// Typically, the implementers of this trait are its associated types themselves. /// This provides a convenient way to access generic information about the scheme /// given any of the associated types. -pub trait AppCrypto: 'static + Send + Sync + Sized + CryptoType + Clone { +pub trait AppCrypto: 'static + Sized + CryptoType { /// Identifier for application-specific key type. const ID: KeyTypeId; @@ -61,38 +61,30 @@ pub trait MaybeHash {} #[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] impl MaybeHash for T {} -/// A application's public key. -pub trait AppPublic: - AppCrypto + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + Codec -{ - /// The wrapped type which is just a plain instance of `Public`. - type Generic: IsWrappedBy - + Public - + Ord - + PartialOrd - + Eq - + PartialEq - + Debug - + MaybeHash - + Codec; -} - -/// A application's key pair. +/// Application-specific key pair. #[cfg(feature = "full_crypto")] -pub trait AppPair: AppCrypto + Pair::Public> { +pub trait AppPair: + AppCrypto + Pair::Public, Signature = ::Signature> +{ /// The wrapped type which is just a plain instance of `Pair`. type Generic: IsWrappedBy + Pair::Public as AppPublic>::Generic> + Pair::Signature as AppSignature>::Generic>; } -/// A application's signature. -pub trait AppSignature: AppCrypto + Eq + PartialEq + Debug { +/// Application-specific public key. +pub trait AppPublic: AppCrypto + Public + Debug + MaybeHash + Codec { + /// The wrapped type which is just a plain instance of `Public`. + type Generic: IsWrappedBy + Public + Debug + MaybeHash + Codec; +} + +/// Application-specific signature. +pub trait AppSignature: AppCrypto + Eq + PartialEq + Debug + Clone { /// The wrapped type which is just a plain instance of `Signature`. type Generic: IsWrappedBy + Eq + PartialEq + Debug; } -/// A runtime interface for a public key. +/// Runtime interface for a public key. pub trait RuntimePublic: Sized { /// The signature that will be generated when signing with the corresponding private key. type Signature: Debug + Eq + PartialEq + Clone; @@ -123,7 +115,7 @@ pub trait RuntimePublic: Sized { fn to_raw_vec(&self) -> Vec; } -/// A runtime interface for an application's public key. +/// Runtime interface for an application's public key. pub trait RuntimeAppPublic: Sized { /// An identifier for this application-specific key type. const ID: KeyTypeId; diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index b10b7a3218ba6..d4b2ccca909b5 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -14,8 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "4.0.0-dev", path = "../../api" } -sp-application-crypto = { version = "7.0.0", path = "../" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-keystore = { version = "0.13.0", default-features = false, path = "../../keystore" } -sp-runtime = { version = "7.0.0", path = "../../runtime" } +sp-application-crypto = { version = "23.0.0", path = "../" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-keystore = { version = "0.27.0", default-features = false, path = "../../keystore" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/primitives/application-crypto/test/src/ecdsa.rs b/primitives/application-crypto/test/src/ecdsa.rs index 99ca6f4c4adf2..396683a91ac02 100644 --- a/primitives/application-crypto/test/src/ecdsa.rs +++ b/primitives/application-crypto/test/src/ecdsa.rs @@ -16,13 +16,13 @@ // limitations under the License. //! Integration tests for ecdsa -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::ecdsa::AppPair; use sp_core::{ crypto::{ByteArray, Pair}, testing::ECDSA, }; -use sp_keystore::{testing::MemoryKeystore, Keystore}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -31,9 +31,12 @@ use substrate_test_runtime_client::{ #[test] fn ecdsa_works_in_runtime() { let keystore = Arc::new(MemoryKeystore::new()); - let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client - .runtime_api() + let test_client = TestClientBuilder::new().build(); + + let mut runtime_api = test_client.runtime_api(); + runtime_api.register_extension(KeystoreExt::new(keystore.clone())); + + let (signature, public) = runtime_api .test_ecdsa_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ecdsa` crypto."); diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index f4553f95bf1f8..f0ceccdcebfcd 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -17,13 +17,13 @@ //! Integration tests for ed25519 -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::ed25519::AppPair; use sp_core::{ crypto::{ByteArray, Pair}, testing::ED25519, }; -use sp_keystore::{testing::MemoryKeystore, Keystore}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -32,9 +32,12 @@ use substrate_test_runtime_client::{ #[test] fn ed25519_works_in_runtime() { let keystore = Arc::new(MemoryKeystore::new()); - let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client - .runtime_api() + let test_client = TestClientBuilder::new().build(); + + let mut runtime_api = test_client.runtime_api(); + runtime_api.register_extension(KeystoreExt::new(keystore.clone())); + + let (signature, public) = runtime_api .test_ed25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ed25519` crypto."); diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index 736521d7d9f3a..3c62270395f04 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -17,13 +17,13 @@ //! Integration tests for sr25519 -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::sr25519::AppPair; use sp_core::{ crypto::{ByteArray, Pair}, testing::SR25519, }; -use sp_keystore::{testing::MemoryKeystore, Keystore}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, @@ -32,9 +32,12 @@ use substrate_test_runtime_client::{ #[test] fn sr25519_works_in_runtime() { let keystore = Arc::new(MemoryKeystore::new()); - let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client - .runtime_api() + let test_client = TestClientBuilder::new().build(); + + let mut runtime_api = test_client.runtime_api(); + runtime_api.register_extension(KeystoreExt::new(keystore.clone())); + + let (signature, public) = runtime_api .test_sr25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `sr25519` crypto."); diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 122691623b8fd..972dfee29d12b 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "6.0.0" +version = "16.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -14,21 +14,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } static_assertions = "1.1.0" -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.4.0" primitive-types = "0.12.0" -sp-core = { version = "7.0.0", features = ["full_crypto"], path = "../core" } +sp-core = { version = "21.0.0", features = ["full_crypto"], path = "../core" } rand = "0.8.5" [features] @@ -39,6 +39,7 @@ std = [ "scale-info/std", "serde/std", "sp-std/std", + "sp-core/std" ] # Serde support without relying on std features. serde = [ diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 99dbdf7487320..976798831797d 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -18,9 +18,7 @@ arbitrary = "1.3.0" fraction = "0.13.1" honggfuzz = "0.5.49" num-bigint = "0.4.3" -num-traits = "0.2.15" -primitive-types = "0.12.0" -sp-arithmetic = { version = "6.0.0", path = ".." } +sp-arithmetic = { version = "16.0.0", path = ".." } [[bin]] name = "biguint" diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs index e9a3208a738ef..5f3f675c971f2 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational_with_rounding.rs @@ -52,9 +52,7 @@ fn check(f: N, n: N, d: N, r: Rounding) where N: MultiplyRational + Into + Copy + core::fmt::Debug, { - let Some(got) = f.multiply_rational(n, d, r) else { - return; - }; + let Some(got) = f.multiply_rational(n, d, r) else { return }; let (ae, be, ce) = (Fraction::from(f.into()), Fraction::from(n.into()), Fraction::from(d.into())); diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 08b788e9abd76..d3e75f6f781cd 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -52,16 +52,18 @@ pub trait FixedPointOperand: { } -impl FixedPointOperand for i128 {} -impl FixedPointOperand for u128 {} -impl FixedPointOperand for i64 {} -impl FixedPointOperand for u64 {} -impl FixedPointOperand for i32 {} -impl FixedPointOperand for u32 {} -impl FixedPointOperand for i16 {} -impl FixedPointOperand for u16 {} -impl FixedPointOperand for i8 {} -impl FixedPointOperand for u8 {} +impl FixedPointOperand for T where + T: Copy + + Clone + + Bounded + + Zero + + Saturating + + PartialOrd + + UniqueSaturatedInto + + TryFrom + + CheckedNeg +{ +} /// Something that implements a decimal fixed point number. /// diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index d2eceafab5ed0..900f0b75c3bf4 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -40,7 +40,9 @@ pub mod per_things; pub mod rational; pub mod traits; -pub use fixed_point::{FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128}; +pub use fixed_point::{ + FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128, FixedU64, +}; pub use per_things::{ InnerOf, MultiplyArg, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, RationalArg, ReciprocalArg, Rounding, SignedRounding, UpperOf, diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 061b11b3e9c72..6fcc8248539ca 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -59,6 +59,7 @@ pub trait BaseArithmetic: + CheckedMul + CheckedDiv + CheckedRem + + CheckedNeg + Ensure + Saturating + PartialOrd @@ -116,6 +117,7 @@ impl< + CheckedMul + CheckedDiv + CheckedRem + + CheckedNeg + Ensure + Saturating + PartialOrd diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index e3a82b5fda514..03c9ef2c7d26b 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../application-crypto" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = ["std"] @@ -30,3 +30,8 @@ std = [ "sp-runtime/std", "sp-std/std", ] +serde = [ + "scale-info/serde", + "sp-application-crypto/serde", + "sp-runtime/serde", +] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 6ccb7980df36e..ba0e1f041e0da 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -13,16 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = [ "std" ] std = [ - "codec/std", "sp-api/std", "sp-inherents/std", "sp-runtime/std", diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 68791852d8458..6320fab9390b9 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -14,14 +14,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } futures = "0.3.21" log = "0.4.17" -lru = "0.8.1" parking_lot = "0.12.1" +schnellru = "0.2.1" thiserror = "1.0.30" sp-api = { version = "4.0.0-dev", path = "../api" } sp-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sp-database = { version = "4.0.0-dev", path = "../database" } -sp-runtime = { version = "7.0.0", path = "../runtime" } -sp-state-machine = { version = "0.13.0", path = "../state-machine" } +sp-runtime = { version = "24.0.0", path = "../runtime" } +sp-state-machine = { version = "0.28.0", path = "../state-machine" } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index e9278be1d5d3c..8208f9128e714 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -196,9 +196,7 @@ pub trait Backend: base_hash: Block::Hash, import_lock: &RwLock<()>, ) -> Result> { - let Some(base_header) = self.header(base_hash)? else { - return Ok(None) - }; + let Some(base_header) = self.header(base_hash)? else { return Ok(None) }; let leaves = { // ensure no blocks are imported during this code block. diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index d7f7086388e7f..74a2ed3fba50d 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -150,7 +150,7 @@ pub enum Error { #[error("Transaction pool not ready for block production.")] TransactionPoolNotReady, - #[error("Database")] + #[error("Database error: {0}")] DatabaseError(#[from] sp_database::error::DatabaseError), #[error("Failed to get header for hash {0}")] diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 1d406dd0f4ed4..08b3c9ab3dfbd 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -18,13 +18,12 @@ //! Implements tree backend, cached header metadata and algorithms //! to compute routes efficiently over the tree of headers. -use lru::LruCache; use parking_lot::RwLock; +use schnellru::{ByLength, LruMap}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; -use std::num::NonZeroUsize; /// Set to the expected max difference between `best` and `finalized` blocks at sync. -const LRU_CACHE_SIZE: usize = 5_000; +const LRU_CACHE_SIZE: u32 = 5_000; /// Get lowest common ancestor between two blocks in the tree. /// @@ -243,20 +242,19 @@ pub trait HeaderMetadata { /// Caches header metadata in an in-memory LRU cache. pub struct HeaderMetadataCache { - cache: RwLock>>, + cache: RwLock>>, } impl HeaderMetadataCache { /// Creates a new LRU header metadata cache with `capacity`. - pub fn new(capacity: NonZeroUsize) -> Self { - HeaderMetadataCache { cache: RwLock::new(LruCache::new(capacity)) } + pub fn new(capacity: u32) -> Self { + HeaderMetadataCache { cache: RwLock::new(LruMap::new(ByLength::new(capacity))) } } } impl Default for HeaderMetadataCache { fn default() -> Self { - let cap = NonZeroUsize::new(LRU_CACHE_SIZE).expect("cache capacity is not zero"); - HeaderMetadataCache { cache: RwLock::new(LruCache::new(cap)) } + HeaderMetadataCache { cache: RwLock::new(LruMap::new(ByLength::new(LRU_CACHE_SIZE))) } } } @@ -266,11 +264,11 @@ impl HeaderMetadataCache { } pub fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.cache.write().put(hash, metadata); + self.cache.write().insert(hash, metadata); } pub fn remove_header_metadata(&self, hash: Block::Hash) { - self.cache.write().pop(&hash); + self.cache.write().remove(&hash); } } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 57689aaa13aa6..b50e43acfd653 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -14,15 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } -sp-consensus = { version = "0.10.0-dev", optional = true, path = "../common" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../timestamp" } [features] @@ -33,7 +32,6 @@ std = [ "scale-info/std", "sp-api/std", "sp-application-crypto/std", - "sp-consensus", "sp-consensus-slots/std", "sp-inherents/std", "sp-runtime/std", diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 5e57f276aac8b..3e22c89a29d64 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -14,18 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } -sp-consensus = { version = "0.10.0-dev", optional = true, path = "../common" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } [features] @@ -37,14 +35,12 @@ std = [ "serde/std", "sp-api/std", "sp-application-crypto/std", - "sp-consensus", "sp-consensus-slots/std", "sp-core/std", "sp-inherents/std", - "sp-keystore", "sp-runtime/std", "sp-std/std", - "sp-timestamp", + "sp-timestamp/std", ] # Serde support without relying on std features. diff --git a/primitives/consensus/beefy/Cargo.toml b/primitives/consensus/beefy/Cargo.toml index cf5f660b22007..e6445919e2a1a 100644 --- a/primitives/consensus/beefy/Cargo.toml +++ b/primitives/consensus/beefy/Cargo.toml @@ -12,22 +12,22 @@ description = "Primitives for BEEFY protocol." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.163", default-features = false, optional = true, features = ["derive", "alloc"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../io" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../io" } sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../../merkle-mountain-range" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } strum = { version = "0.24.1", features = ["derive"], default-features = false } lazy_static = "1.4.0" [dev-dependencies] -array-bytes = "4.1" -sp-keystore = { version = "0.13.0", path = "../../keystore" } +array-bytes = "6.1" +w3f-bls = { version = "0.1.3", features = ["std"]} [features] default = ["std"] @@ -52,3 +52,10 @@ serde = [ "sp-core/serde", "sp-runtime/serde", ] + +# This feature adds BLS crypto primitives. It should not be used in production since +# the BLS implementation and interface may still be subject to significant change. +bls-experimental = [ + "sp-core/bls-experimental", + "sp-application-crypto/bls-experimental", +] diff --git a/primitives/consensus/beefy/src/commitment.rs b/primitives/consensus/beefy/src/commitment.rs index 8ad3cc3721a00..5b6ef9ae5ab36 100644 --- a/primitives/consensus/beefy/src/commitment.rs +++ b/primitives/consensus/beefy/src/commitment.rs @@ -249,30 +249,60 @@ impl From> for VersionedFinalityProof { #[cfg(test)] mod tests { + use super::*; - use crate::{crypto, known_payloads, KEY_TYPE}; + use crate::{ecdsa_crypto::Signature as EcdsaSignature, known_payloads}; use codec::Decode; use sp_core::{keccak_256, Pair}; - use sp_keystore::{testing::MemoryKeystore, KeystorePtr}; + + #[cfg(feature = "bls-experimental")] + use crate::bls_crypto::Signature as BlsSignature; type TestCommitment = Commitment; - type TestSignedCommitment = SignedCommitment; - type TestVersionedFinalityProof = VersionedFinalityProof; const LARGE_RAW_COMMITMENT: &[u8] = include_bytes!("../test-res/large-raw-commitment"); - // The mock signatures are equivalent to the ones produced by the BEEFY keystore - fn mock_signatures() -> (crypto::Signature, crypto::Signature) { - let store: KeystorePtr = MemoryKeystore::new().into(); + // Types for bls-less commitment + type TestEcdsaSignedCommitment = SignedCommitment; + type TestVersionedFinalityProof = VersionedFinalityProof; + + // Types for commitment supporting aggregatable bls signature + #[cfg(feature = "bls-experimental")] + #[derive(Clone, Debug, PartialEq, codec::Encode, codec::Decode)] + struct BlsAggregatableSignature(BlsSignature); + + #[cfg(feature = "bls-experimental")] + #[derive(Clone, Debug, PartialEq, codec::Encode, codec::Decode)] + struct EcdsaBlsSignaturePair(EcdsaSignature, BlsSignature); + + #[cfg(feature = "bls-experimental")] + type TestBlsSignedCommitment = SignedCommitment; + // Generates mock aggregatable ecdsa signature for generating test commitment + // BLS signatures + fn mock_ecdsa_signatures() -> (EcdsaSignature, EcdsaSignature) { let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); - store.insert(KEY_TYPE, "//Alice", alice.public().as_ref()).unwrap(); let msg = keccak_256(b"This is the first message"); - let sig1 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); + let sig1 = alice.sign_prehashed(&msg); let msg = keccak_256(b"This is the second message"); - let sig2 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); + let sig2 = alice.sign_prehashed(&msg); + + (sig1.into(), sig2.into()) + } + + // Generates mock aggregatable bls signature for generating test commitment + // BLS signatures + #[cfg(feature = "bls-experimental")] + fn mock_bls_signatures() -> (BlsSignature, BlsSignature) { + let alice = sp_core::bls::Pair::from_string("//Alice", None).unwrap(); + + let msg = b"This is the first message"; + let sig1 = alice.sign(msg); + + let msg = b"This is the second message"; + let sig2 = alice.sign(msg); (sig1.into(), sig2.into()) } @@ -300,26 +330,26 @@ mod tests { } #[test] - fn signed_commitment_encode_decode() { + fn signed_commitment_encode_decode_ecdsa() { // given let payload = Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; - let sigs = mock_signatures(); + let ecdsa_sigs = mock_ecdsa_signatures(); - let signed = SignedCommitment { - commitment, - signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + let ecdsa_signed = SignedCommitment { + commitment: commitment.clone(), + signatures: vec![None, None, Some(ecdsa_sigs.0.clone()), Some(ecdsa_sigs.1.clone())], }; // when - let encoded = codec::Encode::encode(&signed); - let decoded = TestSignedCommitment::decode(&mut &*encoded); + let encoded = codec::Encode::encode(&ecdsa_signed); + let decoded = TestEcdsaSignedCommitment::decode(&mut &*encoded); // then - assert_eq!(decoded, Ok(signed)); + assert_eq!(decoded, Ok(ecdsa_signed)); assert_eq!( encoded, array_bytes::hex2bytes_unchecked( @@ -334,6 +364,44 @@ mod tests { ); } + #[test] + #[cfg(feature = "bls-experimental")] + fn signed_commitment_encode_decode_ecdsa_n_bls() { + // given + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let commitment: TestCommitment = + Commitment { payload, block_number: 5, validator_set_id: 0 }; + + let ecdsa_sigs = mock_ecdsa_signatures(); + + //including bls signature + let bls_signed_msgs = mock_bls_signatures(); + + let ecdsa_and_bls_signed = SignedCommitment { + commitment, + signatures: vec![ + None, + None, + Some(EcdsaBlsSignaturePair(ecdsa_sigs.0, bls_signed_msgs.0)), + Some(EcdsaBlsSignaturePair(ecdsa_sigs.1, bls_signed_msgs.1)), + ], + }; + + //when + let encoded = codec::Encode::encode(&ecdsa_and_bls_signed); + let decoded = TestBlsSignedCommitment::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(ecdsa_and_bls_signed)); + assert_eq!( + encoded, + array_bytes::hex2bytes_unchecked( + "046d68343048656c6c6f20576f726c642105000000000000000000000000000000000000000000000004300400000008558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01667603fc041cf9d7147d22bf54b15e5778893d6986b71a929747befd3b4d233fbe668bc480e8865116b94db46ca25a01e03c71955f2582604e415da68f2c3c406b9d5f4ad416230ec5453f05ac16a50d8d0923dfb0413cc956ae3fa6334465bd1f2cacec8e9cd606438390fe2a29dc052d6e1f8105c337a86cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b6a0046395a71681be3d0c2a00df61d3b2be0963eb6caa243cc505d327aec73e1bb7ffe9a14b1354b0c406792ac6d6f47c06987c15dec9993f43eefa001d866fe0850d986702c414840f0d9ec0fdc04832ef91ae37c8d49e2f573ca50cb37f152801d489a19395cb04e5fc8f2ab6954b58a3bcc40ef9b6409d2ff7ef07" + ) + ); + } + #[test] fn signed_commitment_count_signatures() { // given @@ -342,7 +410,7 @@ mod tests { let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; - let sigs = mock_signatures(); + let sigs = mock_ecdsa_signatures(); let mut signed = SignedCommitment { commitment, @@ -389,7 +457,7 @@ mod tests { let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; - let sigs = mock_signatures(); + let sigs = mock_ecdsa_signatures(); let signed = SignedCommitment { commitment, @@ -416,7 +484,7 @@ mod tests { let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; - let sigs = mock_signatures(); + let sigs = mock_ecdsa_signatures(); let signatures: Vec> = (0..1024) .into_iter() @@ -426,7 +494,7 @@ mod tests { // when let encoded = codec::Encode::encode(&signed); - let decoded = TestSignedCommitment::decode(&mut &*encoded); + let decoded = TestEcdsaSignedCommitment::decode(&mut &*encoded); // then assert_eq!(decoded, Ok(signed)); diff --git a/primitives/consensus/beefy/src/lib.rs b/primitives/consensus/beefy/src/lib.rs index 268e1925b4449..c69e26bf574d8 100644 --- a/primitives/consensus/beefy/src/lib.rs +++ b/primitives/consensus/beefy/src/lib.rs @@ -47,11 +47,11 @@ use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; use sp_core::H256; -use sp_runtime::traits::{Hash, NumberFor}; +use sp_runtime::traits::{Hash, Keccak256, NumberFor}; use sp_std::prelude::*; /// Key type for BEEFY module. -pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); +pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BEEFY; /// Trait representing BEEFY authority id, including custom signature verification. /// @@ -63,23 +63,21 @@ pub trait BeefyAuthorityId: RuntimeAppPublic { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool; } -/// BEEFY cryptographic types +/// BEEFY cryptographic types for ECDSA crypto /// -/// This module basically introduces three crypto types: -/// - `crypto::Pair` -/// - `crypto::Public` -/// - `crypto::Signature` +/// This module basically introduces four crypto types: +/// - `ecdsa_crypto::Pair` +/// - `ecdsa_crypto::Public` +/// - `ecdsa_crypto::Signature` +/// - `ecdsa_crypto::AuthorityId` /// /// Your code should use the above types as concrete types for all crypto related /// functionality. -/// -/// The current underlying crypto scheme used is ECDSA. This can be changed, -/// without affecting code restricted against the above listed crypto types. -pub mod crypto { - use super::{BeefyAuthorityId, Hash, RuntimeAppPublic}; +pub mod ecdsa_crypto { + use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE as BEEFY_KEY_TYPE}; use sp_application_crypto::{app_crypto, ecdsa}; use sp_core::crypto::Wraps; - app_crypto!(ecdsa, crate::KEY_TYPE); + app_crypto!(ecdsa, BEEFY_KEY_TYPE); /// Identity of a BEEFY authority using ECDSA as its crypto. pub type AuthorityId = Public; @@ -104,6 +102,44 @@ pub mod crypto { } } +/// BEEFY cryptographic types for BLS crypto +/// +/// This module basically introduces four crypto types: +/// - `bls_crypto::Pair` +/// - `bls_crypto::Public` +/// - `bls_crypto::Signature` +/// - `bls_crypto::AuthorityId` +/// +/// Your code should use the above types as concrete types for all crypto related +/// functionality. + +#[cfg(feature = "bls-experimental")] +pub mod bls_crypto { + use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE as BEEFY_KEY_TYPE}; + use sp_application_crypto::{app_crypto, bls377}; + use sp_core::{bls377::Pair as BlsPair, crypto::Wraps, Pair as _}; + app_crypto!(bls377, BEEFY_KEY_TYPE); + + /// Identity of a BEEFY authority using BLS as its crypto. + pub type AuthorityId = Public; + + /// Signature for a BEEFY authority using BLS as its crypto. + pub type AuthoritySignature = Signature; + + impl BeefyAuthorityId for AuthorityId + where + ::Output: Into<[u8; 32]>, + { + fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { + // `w3f-bls` library uses IETF hashing standard and as such does not exposes + // a choice of hash to field function. + // We are directly calling into the library to avoid introducing new host call. + // and because BeefyAuthorityId::verify is being called in the runtime so we don't have + + BlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) + } + } +} /// The `ConsensusEngineId` of BEEFY. pub const BEEFY_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"BEEF"; @@ -156,6 +192,8 @@ impl ValidatorSet { /// The index of an authority. pub type AuthorityIndex = u32; +/// The Hashing used within MMR. +pub type MmrHashing = Keccak256; /// The type used to represent an MMR root hash. pub type MmrRootHash = H256; @@ -302,14 +340,15 @@ impl OpaqueKeyOwnershipProof { sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. - #[api_version(2)] - pub trait BeefyApi + #[api_version(3)] + pub trait BeefyApi where + AuthorityId : Codec + RuntimeAppPublic, { /// Return the block number where BEEFY consensus is enabled/started fn beefy_genesis() -> Option>; /// Return the current active BEEFY validator set - fn validator_set() -> Option>; + fn validator_set() -> Option>; /// Submits an unsigned extrinsic to report an equivocation. The caller /// must provide the equivocation proof and a key ownership proof @@ -321,7 +360,7 @@ sp_api::decl_runtime_apis! { /// hardcoded to return `None`). Only useful in an offchain context. fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: - EquivocationProof, crypto::AuthorityId, crypto::Signature>, + EquivocationProof, AuthorityId, ::Signature>, key_owner_proof: OpaqueKeyOwnershipProof, ) -> Option<()>; @@ -338,9 +377,10 @@ sp_api::decl_runtime_apis! { /// older states to be available. fn generate_key_ownership_proof( set_id: ValidatorSetId, - authority_id: crypto::AuthorityId, + authority_id: AuthorityId, ) -> Option; } + } #[cfg(test)] @@ -364,14 +404,14 @@ mod tests { } #[test] - fn beefy_verify_works() { + fn ecdsa_beefy_verify_works() { let msg = &b"test-message"[..]; - let (pair, _) = crypto::Pair::generate(); + let (pair, _) = ecdsa_crypto::Pair::generate(); - let keccak_256_signature: crypto::Signature = + let keccak_256_signature: ecdsa_crypto::Signature = pair.as_inner_ref().sign_prehashed(&keccak_256(msg)).into(); - let blake2_256_signature: crypto::Signature = + let blake2_256_signature: ecdsa_crypto::Signature = pair.as_inner_ref().sign_prehashed(&blake2_256(msg)).into(); // Verification works if same hashing function is used when signing and verifying. @@ -390,7 +430,7 @@ mod tests { )); // Other public key doesn't work - let (other_pair, _) = crypto::Pair::generate(); + let (other_pair, _) = ecdsa_crypto::Pair::generate(); assert!(!BeefyAuthorityId::::verify( &other_pair.public(), &keccak_256_signature, @@ -402,4 +442,20 @@ mod tests { msg, )); } + + #[test] + #[cfg(feature = "bls-experimental")] + fn bls_beefy_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = bls_crypto::Pair::generate(); + + let signature: bls_crypto::Signature = pair.as_inner_ref().sign(&msg).into(); + + // Verification works if same hashing function is used when signing and verifying. + assert!(BeefyAuthorityId::::verify(&pair.public(), &signature, msg)); + + // Other public key doesn't work + let (other_pair, _) = bls_crypto::Pair::generate(); + assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); + } } diff --git a/primitives/consensus/beefy/src/mmr.rs b/primitives/consensus/beefy/src/mmr.rs index c303cae2fdcc7..991dc07c5a7f3 100644 --- a/primitives/consensus/beefy/src/mmr.rs +++ b/primitives/consensus/beefy/src/mmr.rs @@ -26,7 +26,7 @@ //! but we imagine they will be useful for other chains that either want to bridge with Polkadot //! or are completely standalone, but heavily inspired by Polkadot. -use crate::{crypto::AuthorityId, ConsensusLog, MmrRootHash, Vec, BEEFY_ENGINE_ID}; +use crate::{ecdsa_crypto::AuthorityId, ConsensusLog, MmrRootHash, Vec, BEEFY_ENGINE_ID}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ @@ -102,7 +102,7 @@ impl MmrLeafVersion { /// Details of a BEEFY authority set. #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct BeefyAuthoritySet { +pub struct BeefyAuthoritySet { /// Id of the set. /// /// Id is required to correlate BEEFY signed commitments with the validator set. @@ -115,12 +115,19 @@ pub struct BeefyAuthoritySet { /// of signatures. We put set length here, so that these clients can verify the minimal /// number of required signatures. pub len: u32, - /// Merkle Root Hash built from BEEFY AuthorityIds. + + /// Commitment(s) to BEEFY AuthorityIds. /// /// This is used by Light Clients to confirm that the commitments are signed by the correct /// validator set. Light Clients using interactive protocol, might verify only subset of /// signatures, hence don't require the full list here (will receive inclusion proofs). - pub root: MerkleRoot, + /// + /// This could be Merkle Root Hash built from BEEFY ECDSA public keys and/or + /// polynomial commitment to the polynomial interpolating BLS public keys + /// which is used by APK proof based light clients to verify the validity + /// of aggregated BLS keys using APK proofs. + /// Multiple commitments can be tupled together. + pub keyset_commitment: AuthoritySetCommitment, } /// Details of the next BEEFY authority set. diff --git a/primitives/consensus/beefy/src/test_utils.rs b/primitives/consensus/beefy/src/test_utils.rs index 9e0758cdeb150..b83f657af38e3 100644 --- a/primitives/consensus/beefy/src/test_utils.rs +++ b/primitives/consensus/beefy/src/test_utils.rs @@ -17,13 +17,13 @@ #![cfg(feature = "std")] -use crate::{crypto, Commitment, EquivocationProof, Payload, ValidatorSetId, VoteMessage}; +use crate::{ecdsa_crypto, Commitment, EquivocationProof, Payload, ValidatorSetId, VoteMessage}; use codec::Encode; use sp_core::{ecdsa, keccak_256, Pair}; use std::collections::HashMap; use strum::IntoEnumIterator; -/// Set of test accounts using [`crate::crypto`] types. +/// Set of test accounts using [`crate::ecdsa_crypto`] types. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] pub enum Keyring { @@ -39,19 +39,19 @@ pub enum Keyring { impl Keyring { /// Sign `msg`. - pub fn sign(self, msg: &[u8]) -> crypto::Signature { + pub fn sign(self, msg: &[u8]) -> ecdsa_crypto::Signature { // todo: use custom signature hashing type let msg = keccak_256(msg); ecdsa::Pair::from(self).sign_prehashed(&msg).into() } /// Return key pair. - pub fn pair(self) -> crypto::Pair { + pub fn pair(self) -> ecdsa_crypto::Pair { ecdsa::Pair::from_string(self.to_seed().as_str(), None).unwrap().into() } /// Return public key. - pub fn public(self) -> crypto::Public { + pub fn public(self) -> ecdsa_crypto::Public { self.pair().public() } @@ -61,19 +61,19 @@ impl Keyring { } /// Get Keyring from public key. - pub fn from_public(who: &crypto::Public) -> Option { - Self::iter().find(|&k| &crypto::Public::from(k) == who) + pub fn from_public(who: &ecdsa_crypto::Public) -> Option { + Self::iter().find(|&k| &ecdsa_crypto::Public::from(k) == who) } } lazy_static::lazy_static! { - static ref PRIVATE_KEYS: HashMap = + static ref PRIVATE_KEYS: HashMap = Keyring::iter().map(|i| (i, i.pair())).collect(); - static ref PUBLIC_KEYS: HashMap = + static ref PUBLIC_KEYS: HashMap = PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); } -impl From for crypto::Pair { +impl From for ecdsa_crypto::Pair { fn from(k: Keyring) -> Self { k.pair() } @@ -85,7 +85,7 @@ impl From for ecdsa::Pair { } } -impl From for crypto::Public { +impl From for ecdsa_crypto::Public { fn from(k: Keyring) -> Self { (*PUBLIC_KEYS).get(&k).cloned().unwrap() } @@ -95,7 +95,7 @@ impl From for crypto::Public { pub fn generate_equivocation_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring), vote2: (u64, Payload, ValidatorSetId, &Keyring), -) -> EquivocationProof { +) -> EquivocationProof { let signed_vote = |block_number: u64, payload: Payload, validator_set_id: ValidatorSetId, diff --git a/primitives/consensus/beefy/src/witness.rs b/primitives/consensus/beefy/src/witness.rs index ff9a0401dc632..3f2c2bcbe2829 100644 --- a/primitives/consensus/beefy/src/witness.rs +++ b/primitives/consensus/beefy/src/witness.rs @@ -37,18 +37,21 @@ use crate::commitment::{Commitment, SignedCommitment}; /// Ethereum Mainnet), in a commit-reveal like scheme, where first we submit only the signed /// commitment witness and later on, the client picks only some signatures to verify at random. #[derive(Debug, PartialEq, Eq, codec::Encode, codec::Decode)] -pub struct SignedCommitmentWitness { +pub struct SignedCommitmentWitness { /// The full content of the commitment. pub commitment: Commitment, /// The bit vector of validators who signed the commitment. pub signed_by: Vec, // TODO [ToDr] Consider replacing with bitvec crate - /// A merkle root of signatures in the original signed commitment. - pub signatures_merkle_root: TMerkleRoot, + /// Either a merkle root of signatures in the original signed commitment or a single aggregated + /// BLS signature aggregating all original signatures. + pub signature_accumulator: TSignatureAccumulator, } -impl SignedCommitmentWitness { +impl + SignedCommitmentWitness +{ /// Convert [SignedCommitment] into [SignedCommitmentWitness]. /// /// This takes a [SignedCommitment], which contains full signatures @@ -57,53 +60,85 @@ impl SignedCommitmentWitness( + pub fn from_signed( signed: SignedCommitment, - merkelize: TMerkelize, + aggregator: TSignatureAggregator, ) -> (Self, Vec>) where - TMerkelize: FnOnce(&[Option]) -> TMerkleRoot, + TSignatureAggregator: FnOnce(&[Option]) -> TSignatureAccumulator, { let SignedCommitment { commitment, signatures } = signed; let signed_by = signatures.iter().map(|s| s.is_some()).collect(); - let signatures_merkle_root = merkelize(&signatures); + let signature_accumulator = aggregator(&signatures); - (Self { commitment, signed_by, signatures_merkle_root }, signatures) + (Self { commitment, signed_by, signature_accumulator }, signatures) } } #[cfg(test)] mod tests { use sp_core::{keccak_256, Pair}; - use sp_keystore::{testing::MemoryKeystore, KeystorePtr}; use super::*; use codec::Decode; - use crate::{crypto, known_payloads, Payload, KEY_TYPE}; + use crate::{ecdsa_crypto::Signature as EcdsaSignature, known_payloads, Payload}; + + #[cfg(feature = "bls-experimental")] + use crate::bls_crypto::Signature as BlsSignature; + + #[cfg(feature = "bls-experimental")] + use w3f_bls::{ + single_pop_aggregator::SignatureAggregatorAssumingPoP, Message, SerializableToBytes, + Signed, TinyBLS377, + }; type TestCommitment = Commitment; - type TestSignedCommitment = SignedCommitment; - type TestSignedCommitmentWitness = - SignedCommitmentWitness>>; - // The mock signatures are equivalent to the ones produced by the BEEFY keystore - fn mock_signatures() -> (crypto::Signature, crypto::Signature) { - let store: KeystorePtr = MemoryKeystore::new().into(); + // Types for ecdsa signed commitment. + type TestEcdsaSignedCommitment = SignedCommitment; + type TestEcdsaSignedCommitmentWitness = + SignedCommitmentWitness>>; + + #[cfg(feature = "bls-experimental")] + #[derive(Clone, Debug, PartialEq, codec::Encode, codec::Decode)] + struct EcdsaBlsSignaturePair(EcdsaSignature, BlsSignature); + // types for commitment containing bls signature along side ecdsa signature + #[cfg(feature = "bls-experimental")] + type TestBlsSignedCommitment = SignedCommitment; + #[cfg(feature = "bls-experimental")] + type TestBlsSignedCommitmentWitness = SignedCommitmentWitness>; + + // The mock signatures are equivalent to the ones produced by the BEEFY keystore + fn mock_ecdsa_signatures() -> (EcdsaSignature, EcdsaSignature) { let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); - store.insert(KEY_TYPE, "//Alice", alice.public().as_ref()).unwrap(); let msg = keccak_256(b"This is the first message"); - let sig1 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); + let sig1 = alice.sign_prehashed(&msg); let msg = keccak_256(b"This is the second message"); - let sig2 = store.ecdsa_sign_prehashed(KEY_TYPE, &alice.public(), &msg).unwrap().unwrap(); + let sig2 = alice.sign_prehashed(&msg); (sig1.into(), sig2.into()) } - fn signed_commitment() -> TestSignedCommitment { + // Generates mock aggregatable bls signature for generating test commitment + // BLS signatures + #[cfg(feature = "bls-experimental")] + fn mock_bls_signatures() -> (BlsSignature, BlsSignature) { + let alice = sp_core::bls::Pair::from_string("//Alice", None).unwrap(); + + let msg = b"This is the first message"; + let sig1 = alice.sign(msg); + + let msg = b"This is the second message"; + let sig2 = alice.sign(msg); + + (sig1.into(), sig2.into()) + } + + fn ecdsa_signed_commitment() -> TestEcdsaSignedCommitment { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, "Hello World!".as_bytes().to_vec(), @@ -111,35 +146,97 @@ mod tests { let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; - let sigs = mock_signatures(); + let sigs = mock_ecdsa_signatures(); SignedCommitment { commitment, signatures: vec![None, None, Some(sigs.0), Some(sigs.1)] } } + #[cfg(feature = "bls-experimental")] + fn ecdsa_and_bls_signed_commitment() -> TestBlsSignedCommitment { + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + "Hello World!".as_bytes().to_vec(), + ); + let commitment: TestCommitment = + Commitment { payload, block_number: 5, validator_set_id: 0 }; + + let ecdsa_sigs = mock_ecdsa_signatures(); + let bls_sigs = mock_bls_signatures(); + + SignedCommitment { + commitment, + signatures: vec![ + None, + None, + Some(EcdsaBlsSignaturePair(ecdsa_sigs.0, bls_sigs.0)), + Some(EcdsaBlsSignaturePair(ecdsa_sigs.1, bls_sigs.1)), + ], + } + } + #[test] fn should_convert_signed_commitment_to_witness() { // given - let signed = signed_commitment(); + let signed = ecdsa_signed_commitment(); // when let (witness, signatures) = - TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + TestEcdsaSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); // then - assert_eq!(witness.signatures_merkle_root, signatures); + assert_eq!(witness.signature_accumulator, signatures); } #[test] - fn should_encode_and_decode_witness() { + #[cfg(feature = "bls-experimental")] + fn should_convert_dually_signed_commitment_to_witness() { // given - let signed = signed_commitment(); - let (witness, _) = TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + let signed = ecdsa_and_bls_signed_commitment(); // when + let (witness, _signatures) = + // from signed take a function as the aggregator + TestBlsSignedCommitmentWitness::from_signed::<_, _>(signed, |sigs| { + // we are going to aggregate the signatures here + let mut aggregatedsigs: SignatureAggregatorAssumingPoP = + SignatureAggregatorAssumingPoP::new(Message::new(b"", b"mock payload")); + + for sig in sigs { + match sig { + Some(sig) => { + let serialized_sig : Vec = (*sig.1).to_vec(); + aggregatedsigs.add_signature( + &w3f_bls::Signature::::from_bytes( + serialized_sig.as_slice() + ).unwrap() + ); + }, + None => (), + } + } + (&aggregatedsigs).signature().to_bytes() + }); + + // We can't use BlsSignature::try_from because it expected 112Bytes (CP (64) + BLS 48) + // single signature while we are having a BLS aggregated signature corresponding to no CP. + w3f_bls::Signature::::from_bytes(witness.signature_accumulator.as_slice()) + .unwrap(); + } + + #[test] + fn should_encode_and_decode_witness() { + // Given + let signed = ecdsa_signed_commitment(); + let (witness, _) = TestEcdsaSignedCommitmentWitness::from_signed::<_, _>( + signed, + |sigs: &[std::option::Option]| sigs.to_vec(), + ); + + // When let encoded = codec::Encode::encode(&witness); - let decoded = TestSignedCommitmentWitness::decode(&mut &*encoded); + let decoded = TestEcdsaSignedCommitmentWitness::decode(&mut &*encoded); - // then + // Then assert_eq!(decoded, Ok(witness)); assert_eq!( encoded, diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 1179261340f6e..1f52fb1d44f78 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -18,10 +18,10 @@ async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } log = "0.4.17" thiserror = "1.0.30" -sp-core = { version = "7.0.0", path = "../../core" } +sp-core = { version = "21.0.0", path = "../../core" } sp-inherents = { version = "4.0.0-dev", path = "../../inherents" } -sp-runtime = { version = "7.0.0", path = "../../runtime" } -sp-state-machine = { version = "0.13.0", path = "../../state-machine" } +sp-runtime = { version = "24.0.0", path = "../../runtime" } +sp-state-machine = { version = "0.28.0", path = "../../state-machine" } [dev-dependencies] futures = "0.3.21" diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 215b4448b4a8e..6505d005deb8d 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -25,7 +25,7 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ - traits::{Block as BlockT, HashFor}, + traits::{Block as BlockT, HashingFor}, Digest, }; use sp_state_machine::StorageProof; @@ -71,16 +71,6 @@ pub enum BlockOrigin { File, } -impl From for sp_core::ExecutionContext { - fn from(origin: BlockOrigin) -> Self { - if origin == BlockOrigin::NetworkInitialSync { - sp_core::ExecutionContext::Syncing - } else { - sp_core::ExecutionContext::Importing - } - } -} - /// Environment for a Consensus instance. /// /// Creates proposer instance. @@ -101,13 +91,13 @@ pub trait Environment { } /// A proposal that is created by a [`Proposer`]. -pub struct Proposal { +pub struct Proposal { /// The block that was build. pub block: Block, /// Proof that was recorded while building the block. pub proof: Proof, /// The storage changes while building this block. - pub storage_changes: sp_state_machine::StorageChanges>, + pub storage_changes: sp_state_machine::StorageChanges>, } /// Error that is returned when [`ProofRecording`] requested to record a proof, @@ -187,10 +177,8 @@ mod private { pub trait Proposer { /// Error type which can occur when proposing or evaluating. type Error: From + std::error::Error + 'static; - /// The transaction type used by the backend. - type Transaction: Default + Send + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> + type Proposal: Future, Self::Error>> + Send + Unpin + 'static; diff --git a/primitives/consensus/grandpa/Cargo.toml b/primitives/consensus/grandpa/Cargo.toml index a1846aa76ac1f..2d0ff26f89702 100644 --- a/primitives/consensus/grandpa/Cargo.toml +++ b/primitives/consensus/grandpa/Cargo.toml @@ -14,17 +14,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive", "alloc"], default-features = false, optional = true } +serde = { version = "1.0.163", features = ["derive", "alloc"], default-features = false, optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../../keystore" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } [features] default = ["std"] @@ -37,9 +37,9 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", - "sp-keystore", - "sp-runtime/std", - "sp-std/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std" ] # Serde support without relying on std features. diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 6cc832b5afa5f..1c8c75c4b1934 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } [features] default = ["std"] diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index 29aa3c10c2f55..a0404f46abffa 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../timestamp" } [features] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index d0ec98b1c3080..ee4bf8924186c 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "7.0.0" +version = "21.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -13,11 +13,12 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive","max-encoded-len"] } +arrayvec = { version = "0.7.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive","max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.136", optional = true, default-features = false, features = ["derive", "alloc"] } -bounded-collections = { version = "0.1.4", default-features = false } +serde = { version = "1.0.163", optional = true, default-features = false, features = ["derive", "alloc"] } +bounded-collections = { version = "0.1.8", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } hash-db = { version = "0.16.0", default-features = false } @@ -32,35 +33,37 @@ secrecy = { version = "0.8.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-debug-derive = { version = "5.0.0", default-features = false, path = "../debug-derive" } -sp-storage = { version = "7.0.0", default-features = false, path = "../storage" } -sp-externalities = { version = "0.13.0", optional = true, path = "../externalities" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-debug-derive = { version = "8.0.0", default-features = false, path = "../debug-derive" } +sp-storage = { version = "13.0.0", default-features = false, path = "../storage" } +sp-externalities = { version = "0.19.0", optional = true, path = "../externalities" } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } +tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" # full crypto -array-bytes = { version = "4.1", optional = true } +array-bytes = { version = "6.1", optional = true } ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } -sp-core-hashing = { version = "5.0.0", path = "./hashing", default-features = false, optional = true } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../runtime-interface" } +sp-core-hashing = { version = "9.0.0", path = "./hashing", default-features = false, optional = true } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../runtime-interface" } + # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true} +# bandersnatch crypto +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "c86ebd4", default-features = false, optional = true } [dev-dependencies] -rand = "0.8.5" criterion = "0.4.0" serde_json = "1.0" -sp-core-hashing-proc-macro = { version = "5.0.0", path = "./hashing/proc-macro" } -hex-literal = "0.3.4" +sp-core-hashing-proc-macro = { version = "9.0.0", path = "./hashing/proc-macro" } [[bench]] name = "bench" @@ -72,12 +75,14 @@ bench = false [features] default = ["std"] std = [ + "arrayvec/std", "merlin/std", "full_crypto", "log/std", "thiserror", "lazy_static", "parking_lot", + "bandersnatch_vrfs/getrandom", "bounded-collections/std", "primitive-types/std", "primitive-types/serde", @@ -103,7 +108,6 @@ std = [ "secp256k1/global-context", "sp-core-hashing/std", "sp-debug-derive/std", - "sp-externalities", "sp-storage/std", "sp-runtime-interface/std", "ss58-registry/std", @@ -113,6 +117,8 @@ std = [ "futures/thread-pool", "libsecp256k1/std", "dyn-clonable", + "tracing", + "sp-externalities/std" ] # Serde support without relying on std features. @@ -120,6 +126,7 @@ serde = [ "dep:serde", "array-bytes", "blake2", + "bounded-collections/serde", "bs58/alloc", "scale-info/serde", "secrecy/alloc", @@ -142,6 +149,12 @@ full_crypto = [ "sp-runtime-interface/disable_target_static_assertions", ] -# This feature adds BLS crypto primitives. It should not be used in production since -# the BLS implementation and interface may still be subject to significant change. +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. bls-experimental = ["w3f-bls"] + +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = ["bandersnatch_vrfs"] diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml index a77aaaa7aebc6..43a13883358ea 100644 --- a/primitives/core/hashing/Cargo.toml +++ b/primitives/core/hashing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core-hashing" -version = "5.0.0" +version = "9.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -16,10 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] blake2b_simd = { version = "1.0.1", default-features = false } byteorder = { version = "1.3.2", default-features = false } digest = { version = "0.10.3", default-features = false } -sha2 = { version = "0.10.2", default-features = false } +sha2 = { version = "0.10.7", default-features = false } sha3 = { version = "0.10.0", default-features = false } twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } [features] default = ["std"] @@ -29,6 +28,5 @@ std = [ "byteorder/std", "sha2/std", "sha3/std", - "sp-std/std", "twox-hash/std", ] diff --git a/primitives/core/hashing/proc-macro/Cargo.toml b/primitives/core/hashing/proc-macro/Cargo.toml index 66e9667c8468c..35bb78249d110 100644 --- a/primitives/core/hashing/proc-macro/Cargo.toml +++ b/primitives/core/hashing/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core-hashing-proc-macro" -version = "5.0.0" +version = "9.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "parsing"] } -sp-core-hashing = { version = "5.0.0", default-features = false, path = "../" } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "parsing"] } +sp-core-hashing = { version = "9.0.0", default-features = false, path = "../" } diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs new file mode 100644 index 0000000000000..c3ba7f41058e9 --- /dev/null +++ b/primitives/core/src/bandersnatch.rs @@ -0,0 +1,1042 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! VRFs backed by [Bandersnatch](https://neuromancer.sk/std/bls/Bandersnatch), +//! an elliptic curve built over BLS12-381 scalar field. +//! +//! The primitive can operate both as a traditional VRF or as an anonymized ring VRF. + +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ + ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, +}; +#[cfg(feature = "full_crypto")] +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; + +use bandersnatch_vrfs::CanonicalSerialize; +#[cfg(feature = "full_crypto")] +use bandersnatch_vrfs::SecretKey; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use sp_runtime_interface::pass_by::PassByInner; +use sp_std::{boxed::Box, vec::Vec}; + +/// Identifier used to match public keys against bandersnatch-vrf keys. +pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); + +/// Context used to produce a plain signature without any VRF input/output. +#[cfg(feature = "full_crypto")] +pub const SIGNING_CTX: &[u8] = b"SigningContext"; + +// Max ring domain size. +const RING_DOMAIN_SIZE: usize = 1024; + +#[cfg(feature = "full_crypto")] +const SEED_SERIALIZED_LEN: usize = 32; + +// Short-Weierstrass form serialized sizes. +const PUBLIC_SERIALIZED_LEN: usize = 33; +const SIGNATURE_SERIALIZED_LEN: usize = 65; +const PREOUT_SERIALIZED_LEN: usize = 33; +const PEDERSEN_SIGNATURE_SERIALIZED_LEN: usize = 163; +const RING_PROOF_SERIALIZED_LEN: usize = 592; + +// Max size of serialized ring-vrf context params. +// +// This size is dependent on the ring domain size and the actual value +// is equal to the SCALE encoded size of the `KZG` backend. +// +// Some values: +// ring_size → ~serialized_size +// 512 → 74 KB +// 1024 → 147 KB +// 2048 → 295 KB +// NOTE: This is quite big but looks like there is an upcoming fix +// in the backend. +const RING_CONTEXT_SERIALIZED_LEN: usize = 147752; + +/// Bandersnatch public key. +#[cfg_attr(feature = "full_crypto", derive(Hash))] +#[derive( + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Encode, + Decode, + PassByInner, + MaxEncodedLen, + TypeInfo, +)] +pub struct Public(pub [u8; PUBLIC_SERIALIZED_LEN]); + +impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_LEN]> for Public { + fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_LEN]) -> Self { + Public(raw) + } +} + +impl AsRef<[u8; PUBLIC_SERIALIZED_LEN]> for Public { + fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_LEN] { + &self.0 + } +} + +impl AsRef<[u8]> for Public { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for Public { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl TryFrom<&[u8]> for Public { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != PUBLIC_SERIALIZED_LEN { + return Err(()) + } + let mut r = [0u8; PUBLIC_SERIALIZED_LEN]; + r.copy_from_slice(data); + Ok(Self::unchecked_from(r)) + } +} + +impl ByteArray for Public { + const LEN: usize = PUBLIC_SERIALIZED_LEN; +} + +impl TraitPublic for Public {} + +impl CryptoType for Public { + #[cfg(feature = "full_crypto")] + type Pair = Pair; +} + +impl Derive for Public {} + +impl sp_std::fmt::Debug for Public { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +/// Bandersnatch signature. +/// +/// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as `label`. +#[cfg_attr(feature = "full_crypto", derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] +pub struct Signature([u8; SIGNATURE_SERIALIZED_LEN]); + +impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_LEN]> for Signature { + fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_LEN]) -> Self { + Signature(raw) + } +} + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl TryFrom<&[u8]> for Signature { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != SIGNATURE_SERIALIZED_LEN { + return Err(()) + } + let mut r = [0u8; SIGNATURE_SERIALIZED_LEN]; + r.copy_from_slice(data); + Ok(Self::unchecked_from(r)) + } +} + +impl ByteArray for Signature { + const LEN: usize = SIGNATURE_SERIALIZED_LEN; +} + +impl CryptoType for Signature { + #[cfg(feature = "full_crypto")] + type Pair = Pair; +} + +impl sp_std::fmt::Debug for Signature { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +/// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. +#[cfg(feature = "full_crypto")] +type Seed = [u8; SEED_SERIALIZED_LEN]; + +/// Bandersnatch secret key. +#[cfg(feature = "full_crypto")] +#[derive(Clone)] +pub struct Pair { + secret: SecretKey, + seed: Seed, +} + +#[cfg(feature = "full_crypto")] +impl Pair { + /// Get the key seed. + pub fn seed(&self) -> Seed { + self.seed + } +} + +#[cfg(feature = "full_crypto")] +impl TraitPair for Pair { + type Seed = Seed; + type Public = Public; + type Signature = Signature; + + /// Make a new key pair from secret seed material. + /// + /// The slice must be 64 bytes long or it will return an error. + fn from_seed_slice(seed_slice: &[u8]) -> Result { + if seed_slice.len() != SEED_SERIALIZED_LEN { + return Err(SecretStringError::InvalidSeedLength) + } + let mut seed = [0; SEED_SERIALIZED_LEN]; + seed.copy_from_slice(seed_slice); + let secret = SecretKey::from_seed(&seed); + Ok(Pair { secret, seed }) + } + + /// Derive a child key from a series of given (hard) junctions. + /// + /// Soft junctions are not supported. + fn derive>( + &self, + path: Iter, + _seed: Option, + ) -> Result<(Pair, Option), DeriveError> { + let derive_hard = |seed, cc| -> Seed { + ("bandersnatch-vrf-HDKD", seed, cc).using_encoded(sp_core_hashing::blake2_256) + }; + + let mut seed = self.seed(); + for p in path { + if let DeriveJunction::Hard(cc) = p { + seed = derive_hard(seed, cc); + } else { + return Err(DeriveError::SoftKeyInPath) + } + } + Ok((Self::from_seed(&seed), Some(seed))) + } + + /// Get the public key. + fn public(&self) -> Public { + let public = self.secret.to_public(); + let mut raw = [0; PUBLIC_SERIALIZED_LEN]; + public + .serialize_compressed(raw.as_mut_slice()) + .expect("key buffer length is good; qed"); + Public::unchecked_from(raw) + } + + /// Sign raw data. + fn sign(&self, data: &[u8]) -> Signature { + let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data], None); + self.vrf_sign(&data).signature + } + + /// Verify a signature on a message. + /// + /// Returns `true` if the signature is good. + fn verify>(signature: &Signature, data: M, public: &Public) -> bool { + let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data.as_ref()], None); + let signature = + vrf::VrfSignature { signature: *signature, vrf_outputs: vrf::VrfIosVec::default() }; + public.vrf_verify(&data, &signature) + } + + /// Return a vector filled with seed raw data. + fn to_raw_vec(&self) -> Vec { + self.seed().to_vec() + } +} + +#[cfg(feature = "full_crypto")] +impl CryptoType for Pair { + type Pair = Pair; +} + +/// Bandersnatch VRF types and operations. +pub mod vrf { + use super::*; + use crate::{bounded::BoundedVec, crypto::VrfCrypto, ConstU32}; + use bandersnatch_vrfs::{ + CanonicalDeserialize, CanonicalSerialize, IntoVrfInput, Message, PublicKey, + ThinVrfSignature, Transcript, + }; + + /// Max number of inputs/outputs which can be handled by the VRF signing procedures. + /// The number is quite arbitrary and fullfils the current usage of the primitive. + /// If required it can be extended in the future. + pub const MAX_VRF_IOS: u32 = 3; + + /// Bounded vector used for VRF inputs and outputs. + /// + /// Can contain at most [`MAX_VRF_IOS`] elements. + pub type VrfIosVec = BoundedVec>; + + /// VRF input to construct a [`VrfOutput`] instance and embeddable within [`VrfSignData`]. + #[derive(Clone, Debug)] + pub struct VrfInput(pub(super) bandersnatch_vrfs::VrfInput); + + impl VrfInput { + /// Construct a new VRF input. + pub fn new(domain: impl AsRef<[u8]>, data: impl AsRef<[u8]>) -> Self { + let msg = Message { domain: domain.as_ref(), message: data.as_ref() }; + VrfInput(msg.into_vrf_input()) + } + } + + /// VRF (pre)output derived from [`VrfInput`] using a [`VrfSecret`]. + /// + /// This is used to produce an arbitrary number of verifiable *random* bytes. + #[derive(Clone, Debug, PartialEq, Eq)] + pub struct VrfOutput(pub(super) bandersnatch_vrfs::VrfPreOut); + + impl Encode for VrfOutput { + fn encode(&self) -> Vec { + let mut bytes = [0; PREOUT_SERIALIZED_LEN]; + self.0 + .serialize_compressed(bytes.as_mut_slice()) + .expect("preout serialization can't fail"); + bytes.encode() + } + } + + impl Decode for VrfOutput { + fn decode(i: &mut R) -> Result { + let buf = <[u8; PREOUT_SERIALIZED_LEN]>::decode(i)?; + let preout = bandersnatch_vrfs::VrfPreOut::deserialize_compressed(buf.as_slice()) + .map_err(|_| "vrf-preout decode error: bad preout")?; + Ok(VrfOutput(preout)) + } + } + + impl MaxEncodedLen for VrfOutput { + fn max_encoded_len() -> usize { + <[u8; PREOUT_SERIALIZED_LEN]>::max_encoded_len() + } + } + + impl TypeInfo for VrfOutput { + type Identity = [u8; PREOUT_SERIALIZED_LEN]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// A *Fiat-Shamir* transcript and a sequence of [`VrfInput`]s ready to be signed. + /// + /// The `transcript` will be used as messages for the *Fiat-Shamir* + /// transform part of the scheme. This data keeps the signature secure + /// but doesn't contribute to the actual VRF output. If unsure just give + /// it a unique label depending on the actual usage of the signing data. + /// + /// The `vrf_inputs` is a sequence of [`VrfInput`]s to be signed and which + /// are used to construct the [`VrfOutput`]s in the signature. + #[derive(Clone)] + pub struct VrfSignData { + /// VRF inputs to be signed. + pub vrf_inputs: VrfIosVec, + /// Associated Fiat-Shamir transcript. + pub transcript: Transcript, + } + + impl VrfSignData { + /// Construct a new data to be signed. + /// + /// The `transcript_data` is used to construct the *Fiat-Shamir* `Transcript`. + /// Fails if the `vrf_inputs` yields more elements than [`MAX_VRF_IOS`] + /// + /// Refer to the [`VrfSignData`] for more details about the usage of + /// `transcript_data` and `vrf_inputs` + pub fn new( + label: &'static [u8], + transcript_data: impl IntoIterator>, + vrf_inputs: impl IntoIterator, + ) -> Result { + let vrf_inputs: Vec = vrf_inputs.into_iter().collect(); + if vrf_inputs.len() > MAX_VRF_IOS as usize { + return Err(()) + } + Ok(Self::new_unchecked(label, transcript_data, vrf_inputs)) + } + + /// Construct a new data to be signed. + /// + /// The `transcript_data` is used to construct the *Fiat-Shamir* `Transcript`. + /// At most the first [`MAX_VRF_IOS`] elements of `vrf_inputs` are used. + /// + /// Refer to the [`VrfSignData`] for more details about the usage of + /// `transcript_data` and `vrf_inputs` + pub fn new_unchecked( + label: &'static [u8], + transcript_data: impl IntoIterator>, + vrf_inputs: impl IntoIterator, + ) -> Self { + let vrf_inputs: Vec = vrf_inputs.into_iter().collect(); + let vrf_inputs = VrfIosVec::truncate_from(vrf_inputs); + let mut transcript = Transcript::new_labeled(label); + transcript_data + .into_iter() + .for_each(|data| transcript.append_slice(data.as_ref())); + VrfSignData { transcript, vrf_inputs } + } + + /// Append a raw message to the transcript. + pub fn push_transcript_data(&mut self, data: &[u8]) { + self.transcript.append_slice(data); + } + + /// Append a [`VrfInput`] to the vrf inputs to be signed. + /// + /// On failure, gives back the [`VrfInput`] parameter. + pub fn push_vrf_input(&mut self, vrf_input: VrfInput) -> Result<(), VrfInput> { + self.vrf_inputs.try_push(vrf_input) + } + + /// Create challenge from the transcript contained within the signing data. + pub fn challenge(&self) -> [u8; N] { + let mut output = [0; N]; + let mut transcript = self.transcript.clone(); + let mut reader = transcript.challenge(b"Prehashed for bandersnatch"); + reader.read_bytes(&mut output); + output + } + } + + /// VRF signature. + #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct VrfSignature { + /// VRF (pre)outputs. + pub vrf_outputs: VrfIosVec, + /// VRF signature. + pub signature: Signature, + } + + #[cfg(feature = "full_crypto")] + impl VrfCrypto for Pair { + type VrfInput = VrfInput; + type VrfOutput = VrfOutput; + type VrfSignData = VrfSignData; + type VrfSignature = VrfSignature; + } + + #[cfg(feature = "full_crypto")] + impl VrfSecret for Pair { + fn vrf_sign(&self, data: &Self::VrfSignData) -> Self::VrfSignature { + const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); + // Workaround to overcome backend signature generic over the number of IOs. + match data.vrf_inputs.len() { + 0 => self.vrf_sign_gen::<0>(data), + 1 => self.vrf_sign_gen::<1>(data), + 2 => self.vrf_sign_gen::<2>(data), + 3 => self.vrf_sign_gen::<3>(data), + _ => unreachable!(), + } + } + + fn vrf_output(&self, input: &Self::VrfInput) -> Self::VrfOutput { + let output = self.secret.0.vrf_preout(&input.0); + VrfOutput(output) + } + } + + impl VrfCrypto for Public { + type VrfInput = VrfInput; + type VrfOutput = VrfOutput; + type VrfSignData = VrfSignData; + type VrfSignature = VrfSignature; + } + + impl VrfPublic for Public { + fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { + const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); + let preouts_len = signature.vrf_outputs.len(); + if preouts_len != data.vrf_inputs.len() { + return false + } + // Workaround to overcome backend signature generic over the number of IOs. + match preouts_len { + 0 => self.vrf_verify_gen::<0>(data, signature), + 1 => self.vrf_verify_gen::<1>(data, signature), + 2 => self.vrf_verify_gen::<2>(data, signature), + 3 => self.vrf_verify_gen::<3>(data, signature), + _ => unreachable!(), + } + } + } + + #[cfg(feature = "full_crypto")] + impl Pair { + fn vrf_sign_gen(&self, data: &VrfSignData) -> VrfSignature { + let ios: Vec<_> = data + .vrf_inputs + .iter() + .map(|i| self.secret.clone().0.vrf_inout(i.0.clone())) + .collect(); + + let signature: ThinVrfSignature = + self.secret.sign_thin_vrf(data.transcript.clone(), ios.as_slice()); + + let mut sign_bytes = [0; SIGNATURE_SERIALIZED_LEN]; + signature + .signature + .serialize_compressed(sign_bytes.as_mut_slice()) + .expect("serialization can't fail"); + + let outputs: Vec<_> = signature.preoutputs.into_iter().map(VrfOutput).collect(); + let outputs = VrfIosVec::truncate_from(outputs); + VrfSignature { signature: Signature(sign_bytes), vrf_outputs: outputs } + } + + /// Generate an arbitrary number of bytes from the given `context` and VRF `input`. + pub fn make_bytes( + &self, + context: &'static [u8], + input: &VrfInput, + ) -> [u8; N] { + let transcript = Transcript::new_labeled(context); + let inout = self.secret.clone().0.vrf_inout(input.0.clone()); + inout.vrf_output_bytes(transcript) + } + } + + impl Public { + fn vrf_verify_gen( + &self, + data: &VrfSignData, + signature: &VrfSignature, + ) -> bool { + let Ok(public) = PublicKey::deserialize_compressed(self.as_slice()) else { + return false + }; + + let Ok(preouts) = signature + .vrf_outputs + .iter() + .map(|o| o.0.clone()) + .collect::>() + .into_inner() + else { + return false + }; + + // Deserialize only the proof, the rest has already been deserialized + // This is another hack used because backend signature type is generic over + // the number of ios. + let Ok(signature) = + ThinVrfSignature::<0>::deserialize_compressed(signature.signature.as_ref()) + .map(|s| s.signature) + else { + return false + }; + let signature = ThinVrfSignature { signature, preoutputs: preouts }; + + let inputs = data.vrf_inputs.iter().map(|i| i.0.clone()); + + signature.verify_thin_vrf(data.transcript.clone(), inputs, &public).is_ok() + } + } + + impl VrfOutput { + /// Generate an arbitrary number of bytes from the given `context` and VRF `input`. + pub fn make_bytes( + &self, + context: &'static [u8], + input: &VrfInput, + ) -> [u8; N] { + let transcript = Transcript::new_labeled(context); + let inout = + bandersnatch_vrfs::VrfInOut { input: input.0.clone(), preoutput: self.0.clone() }; + inout.vrf_output_bytes(transcript) + } + } +} + +/// Bandersnatch Ring-VRF types and operations. +pub mod ring_vrf { + use super::{vrf::*, *}; + pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG}; + use bandersnatch_vrfs::{CanonicalDeserialize, PedersenVrfSignature, PublicKey}; + + /// Context used to produce ring signatures. + #[derive(Clone)] + pub struct RingContext(KZG); + + impl RingContext { + /// Build an dummy instance used for testing purposes. + pub fn new_testing() -> Self { + Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32)) + } + + /// Get the keyset max size. + pub fn max_keyset_size(&self) -> usize { + self.0.max_keyset_size() + } + + /// Get ring prover for the key at index `public_idx` in the `public_keys` set. + pub fn prover(&self, public_keys: &[Public], public_idx: usize) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + for public_key in public_keys { + let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + pks.push(pk.0 .0.into()); + } + + let prover_key = self.0.prover_key(pks); + let ring_prover = self.0.init_ring_prover(prover_key, public_idx); + Some(ring_prover) + } + + /// Get ring verifier for the `public_keys` set. + pub fn verifier(&self, public_keys: &[Public]) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + for public_key in public_keys { + let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + pks.push(pk.0 .0.into()); + } + + let verifier_key = self.0.verifier_key(pks); + let ring_verifier = self.0.init_ring_verifier(verifier_key); + Some(ring_verifier) + } + } + + impl Encode for RingContext { + fn encode(&self) -> Vec { + let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_LEN]); + self.0 + .serialize_compressed(buf.as_mut_slice()) + .expect("preout serialization can't fail"); + buf.encode() + } + } + + impl Decode for RingContext { + fn decode(i: &mut R) -> Result { + let buf = >::decode(i)?; + let kzg = + KZG::deserialize_compressed(buf.as_slice()).map_err(|_| "KZG decode error")?; + Ok(RingContext(kzg)) + } + } + + impl MaxEncodedLen for RingContext { + fn max_encoded_len() -> usize { + <[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len() + } + } + + impl TypeInfo for RingContext { + type Identity = [u8; RING_CONTEXT_SERIALIZED_LEN]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// Ring VRF signature. + #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct RingVrfSignature { + /// VRF (pre)outputs. + pub outputs: VrfIosVec, + /// Pedersen VRF signature. + signature: [u8; PEDERSEN_SIGNATURE_SERIALIZED_LEN], + /// Ring proof. + ring_proof: [u8; RING_PROOF_SERIALIZED_LEN], + } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Produce a ring-vrf signature. + /// + /// The ring signature is verifiable if the public key corresponding to the + /// signing [`Pair`] is part of the ring from which the [`RingProver`] has + /// been constructed. If not, the produced signature is just useless. + pub fn ring_vrf_sign(&self, data: &VrfSignData, prover: &RingProver) -> RingVrfSignature { + const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); + // Workaround to overcome backend signature generic over the number of IOs. + match data.vrf_inputs.len() { + 0 => self.ring_vrf_sign_gen::<0>(data, prover), + 1 => self.ring_vrf_sign_gen::<1>(data, prover), + 2 => self.ring_vrf_sign_gen::<2>(data, prover), + 3 => self.ring_vrf_sign_gen::<3>(data, prover), + _ => unreachable!(), + } + } + + fn ring_vrf_sign_gen( + &self, + data: &VrfSignData, + prover: &RingProver, + ) -> RingVrfSignature { + let ios: Vec<_> = data + .vrf_inputs + .iter() + .map(|i| self.secret.clone().0.vrf_inout(i.0.clone())) + .collect(); + + let ring_signature: bandersnatch_vrfs::RingVrfSignature = + self.secret.sign_ring_vrf(data.transcript.clone(), ios.as_slice(), prover); + + let outputs: Vec<_> = ring_signature.preoutputs.into_iter().map(VrfOutput).collect(); + let outputs = VrfIosVec::truncate_from(outputs); + + let mut signature = [0; PEDERSEN_SIGNATURE_SERIALIZED_LEN]; + ring_signature + .signature + .serialize_compressed(signature.as_mut_slice()) + .expect("ped-signature serialization can't fail"); + + let mut ring_proof = [0; RING_PROOF_SERIALIZED_LEN]; + ring_signature + .ring_proof + .serialize_compressed(ring_proof.as_mut_slice()) + .expect("ring-proof serialization can't fail"); + + RingVrfSignature { outputs, signature, ring_proof } + } + } + + impl RingVrfSignature { + /// Verify a ring-vrf signature. + /// + /// The signature is verifiable if it has been produced by a member of the ring + /// from which the [`RingVerifier`] has been constructed. + pub fn verify(&self, data: &VrfSignData, verifier: &RingVerifier) -> bool { + const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); + let preouts_len = self.outputs.len(); + if preouts_len != data.vrf_inputs.len() { + return false + } + // Workaround to overcome backend signature generic over the number of IOs. + match preouts_len { + 0 => self.verify_gen::<0>(data, verifier), + 1 => self.verify_gen::<1>(data, verifier), + 2 => self.verify_gen::<2>(data, verifier), + 3 => self.verify_gen::<3>(data, verifier), + _ => unreachable!(), + } + } + + fn verify_gen(&self, data: &VrfSignData, verifier: &RingVerifier) -> bool { + let Ok(preoutputs) = self + .outputs + .iter() + .map(|o| o.0.clone()) + .collect::>() + .into_inner() + else { + return false + }; + + let Ok(signature) = + PedersenVrfSignature::deserialize_compressed(self.signature.as_slice()) + else { + return false + }; + + let Ok(ring_proof) = RingProof::deserialize_compressed(self.ring_proof.as_slice()) + else { + return false + }; + + let ring_signature = + bandersnatch_vrfs::RingVrfSignature { signature, preoutputs, ring_proof }; + + let inputs = data.vrf_inputs.iter().map(|i| i.0.clone()); + + ring_signature + .verify_ring_vrf(data.transcript.clone(), inputs, verifier) + .is_ok() + } + } +} + +#[cfg(test)] +mod tests { + use super::{ring_vrf::*, vrf::*, *}; + use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; + const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0xcb; SEED_SERIALIZED_LEN]; + + #[allow(unused)] + fn b2h(bytes: &[u8]) -> String { + array_bytes::bytes2hex("", bytes) + } + + fn h2b(hex: &str) -> Vec { + array_bytes::hex2bytes_unchecked(hex) + } + + #[test] + fn assumptions_sanity_check() { + // Backend + let ring_ctx = RingContext::new_testing(); + let pair = SecretKey::from_seed(DEV_SEED); + let public = pair.to_public(); + + assert_eq!(public.0.size_of_serialized(), PUBLIC_SERIALIZED_LEN); + assert_eq!(ring_ctx.max_keyset_size(), RING_DOMAIN_SIZE - 257); + + // Wrapper + let inputs: Vec<_> = (0..MAX_VRF_IOS - 1).map(|_| VrfInput::new(b"", &[])).collect(); + let mut sign_data = VrfSignData::new(b"", &[b""], inputs).unwrap(); + let res = sign_data.push_vrf_input(VrfInput::new(b"", b"")); + assert!(res.is_ok()); + let res = sign_data.push_vrf_input(VrfInput::new(b"", b"")); + assert!(res.is_err()); + let inputs: Vec<_> = (0..MAX_VRF_IOS + 1).map(|_| VrfInput::new(b"", b"")).collect(); + let res = VrfSignData::new(b"mydata", &[b"tdata"], inputs); + assert!(res.is_err()); + } + + #[test] + fn derive_works() { + let pair = Pair::from_string(&format!("{}//Alice//Hard", DEV_PHRASE), None).unwrap(); + let known = h2b("2b340c18b94dc1916979cb83daf3ed4ac106742ddc06afc42cf26be3b18a523f80"); + assert_eq!(pair.public().as_ref(), known); + + // Soft derivation not supported + let res = Pair::from_string(&format!("{}//Alice/Soft", DEV_PHRASE), None); + assert!(res.is_err()); + } + + #[test] + fn sign_verify() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + let msg = b"hello"; + + let signature = pair.sign(msg); + assert!(Pair::verify(&signature, msg, &public)); + } + + #[test] + fn vrf_sign_verify() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + let i3 = VrfInput::new(b"dom3", b"baz"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1, i2, i3]); + + let signature = pair.vrf_sign(&data); + + assert!(public.vrf_verify(&data, &signature)); + } + + #[test] + fn vrf_sign_verify_bad_inputs() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"aaaa"], [i1.clone(), i2.clone()]); + let signature = pair.vrf_sign(&data); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"bbb"], [i1, i2.clone()]); + assert!(!public.vrf_verify(&data, &signature)); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"aaa"], [i2]); + assert!(!public.vrf_verify(&data, &signature)); + } + + #[test] + fn vrf_make_bytes_matches() { + let pair = Pair::from_seed(DEV_SEED); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]); + let signature = pair.vrf_sign(&data); + + let o10 = pair.make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.vrf_outputs[0].make_bytes::<32>(b"ctx1", &i1); + assert_eq!(o10, o11); + + let o20 = pair.make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.vrf_outputs[1].make_bytes::<48>(b"ctx2", &i2); + assert_eq!(o20, o21); + } + + #[test] + fn encode_decode_vrf_signature() { + // Transcript data is hashed together and signed. + // It doesn't contribute to serialized length. + let pair = Pair::from_seed(DEV_SEED); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]); + let expected = pair.vrf_sign(&data); + + let bytes = expected.encode(); + + let expected_len = + data.vrf_inputs.len() * PREOUT_SERIALIZED_LEN + SIGNATURE_SERIALIZED_LEN + 1; + assert_eq!(bytes.len(), expected_len); + + let decoded = VrfSignature::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(expected, decoded); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], []); + let expected = pair.vrf_sign(&data); + + let bytes = expected.encode(); + + let decoded = VrfSignature::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(expected, decoded); + } + + #[test] + fn ring_vrf_sign_verify() { + let ring_ctx = RingContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + + // Just pick one index to patch with the actual public key + let prover_idx = 3; + pks[prover_idx] = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + let i3 = VrfInput::new(b"dom3", b"baz"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1, i2, i3]); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let signature = pair.ring_vrf_sign(&data, &prover); + + let verifier = ring_ctx.verifier(&pks).unwrap(); + assert!(signature.verify(&data, &verifier)); + } + + #[test] + fn ring_vrf_sign_verify_with_out_of_ring_key() { + let ring_ctx = RingContext::new_testing(); + + let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + let pair = Pair::from_seed(DEV_SEED); + + // Just pick one index to patch with the actual public key + let i1 = VrfInput::new(b"dom1", b"foo"); + let data = VrfSignData::new_unchecked(b"mydata", Some(b"tdata"), Some(i1)); + + // pair.public != pks[0] + let prover = ring_ctx.prover(&pks, 0).unwrap(); + let signature = pair.ring_vrf_sign(&data, &prover); + + let verifier = ring_ctx.verifier(&pks).unwrap(); + assert!(!signature.verify(&data, &verifier)); + } + + #[test] + fn encode_decode_ring_vrf_signature() { + let ring_ctx = RingContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + + // Just pick one... + let prover_idx = 3; + pks[prover_idx] = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + let i3 = VrfInput::new(b"dom3", b"baz"); + + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1, i2, i3]); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let expected = pair.ring_vrf_sign(&data, &prover); + + let bytes = expected.encode(); + + let expected_len = data.vrf_inputs.len() * PREOUT_SERIALIZED_LEN + + PEDERSEN_SIGNATURE_SERIALIZED_LEN + + RING_PROOF_SERIALIZED_LEN + + 1; + assert_eq!(bytes.len(), expected_len); + + let decoded = RingVrfSignature::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(expected, decoded); + } + + #[test] + fn encode_decode_ring_vrf_context() { + let ctx1 = RingContext::new_testing(); + let enc1 = ctx1.encode(); + + assert_eq!(enc1.len(), RingContext::max_encoded_len()); + + let ctx2 = RingContext::decode(&mut enc1.as_slice()).unwrap(); + let enc2 = ctx2.encode(); + + assert_eq!(enc1, enc2); + } +} diff --git a/primitives/core/src/bls.rs b/primitives/core/src/bls.rs index 86db4525fb8ae..951aa1828ea51 100644 --- a/primitives/core/src/bls.rs +++ b/primitives/core/src/bls.rs @@ -34,7 +34,7 @@ use w3f_bls::{DoublePublicKey, DoubleSignature, EngineBLS, SerializableToBytes, #[cfg(feature = "full_crypto")] use w3f_bls::{DoublePublicKeyScheme, Keypair, Message, SecretKey}; -use sp_runtime_interface::pass_by::PassByInner; +use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; use sp_std::{convert::TryFrom, marker::PhantomData, ops::Deref}; /// BLS-377 specialized types @@ -165,6 +165,10 @@ impl PassByInner for Public { } } +impl PassBy for Public { + type PassBy = pass_by::Inner; +} + impl AsRef<[u8; PUBLIC_KEY_SERIALIZED_SIZE]> for Public { fn as_ref(&self) -> &[u8; PUBLIC_KEY_SERIALIZED_SIZE] { &self.inner @@ -332,7 +336,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&array_bytes::bytes2hex("", self)) } } @@ -514,7 +518,6 @@ mod test { use super::*; use crate::crypto::DEV_PHRASE; use bls377::{Pair, Signature}; - use hex_literal::hex; #[test] fn default_phrase_should_be_used() { @@ -529,7 +532,9 @@ mod test { // Only passes if the seed = (seed mod ScalarField) #[test] fn seed_and_derive_should_work() { - let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f00"); + let seed = array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f00", + ); let pair = Pair::from_seed(&seed); // we are using hash to field so this is not going to work // assert_eq!(pair.seed(), seed); @@ -537,25 +542,27 @@ mod test { let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.to_raw_vec(), - hex!("a4f2269333b3e87c577aa00c4a2cd650b3b30b2e8c286a47c251279ff3a26e0d") + array_bytes::hex2array_unchecked::<_, 32>( + "a4f2269333b3e87c577aa00c4a2cd650b3b30b2e8c286a47c251279ff3a26e0d" + ) ); } #[test] fn test_vector_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let public = pair.public(); assert_eq!( public, - Public::unchecked_from(hex!( + Public::unchecked_from(array_bytes::hex2array_unchecked( "7a84ca8ce4c37c93c95ecee6a3c0c9a7b9c225093cf2f12dc4f69cbfb847ef9424a18f5755d5a742247d386ff2aabb806bcf160eff31293ea9616976628f77266c8a8cc1d8753be04197bd6cdd8c5c87a148f782c4c1568d599b48833fd539001e580cff64bbc71850605433fcd051f3afc3b74819786f815ffb5272030a8d03e5df61e6183f8fd8ea85f26defa83400" )) ); let message = b""; let signature = - hex!("d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + array_bytes::hex2array_unchecked("d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" ); let signature = Signature::unchecked_from(signature); assert!(pair.sign(&message[..]) == signature); @@ -572,13 +579,13 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::unchecked_from(hex!( + Public::unchecked_from(array_bytes::hex2array_unchecked( "6dc6be608fab3c6bd894a606be86db346cc170db85c733853a371f3db54ae1b12052c0888d472760c81b537572a26f00db865e5963aef8634f9917571c51b538b564b2a9ceda938c8b930969ee3b832448e08e33a79e9ddd28af419a3ce45300f5dbc768b067781f44f3fe05a19e6b07b1c4196151ec3f8ea37e4f89a8963030d2101e931276bb9ebe1f20102239d780" )) ); let message = b""; let signature = - hex!("bbb395bbdee1a35930912034f5fde3b36df2835a0536c865501b0675776a1d5931a3bea2e66eff73b2546c6af2061a8019223e4ebbbed661b2538e0f5823f2c708eb89c406beca8fcb53a5c13dbc7c0c42e4cf2be2942bba96ea29297915a06bd2b1b979c0e2ac8fd4ec684a6b5d110c" + array_bytes::hex2array_unchecked("bbb395bbdee1a35930912034f5fde3b36df2835a0536c865501b0675776a1d5931a3bea2e66eff73b2546c6af2061a8019223e4ebbbed661b2538e0f5823f2c708eb89c406beca8fcb53a5c13dbc7c0c42e4cf2be2942bba96ea29297915a06bd2b1b979c0e2ac8fd4ec684a6b5d110c" ); let expected_signature = Signature::unchecked_from(signature); println!("signature is {:?}", pair.sign(&message[..])); @@ -603,12 +610,12 @@ mod test { assert_eq!( public, Public::unchecked_from( - hex!( + array_bytes::hex2array_unchecked( "754d2f2bbfa67df54d7e0e951979a18a1e0f45948857752cc2bac6bbb0b1d05e8e48bcc453920bf0c4bbd5993212480112a1fb433f04d74af0a8b700d93dc957ab3207f8d071e948f5aca1a7632c00bdf6d06be05b43e2e6216dccc8a5d55a0071cb2313cfd60b7e9114619cd17c06843b352f0b607a99122f6651df8f02e1ad3697bd208e62af047ddd7b942ba80080") ) ); let message = - hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000" + array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000" ); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 5947603cd0942..6afe4b752a690 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -15,9 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] //! Cryptographic utilities. -// end::description[] use crate::{ed25519, sr25519}; #[cfg(feature = "std")] @@ -486,7 +484,7 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } /// Trait suitable for typical cryptographic key public type. -pub trait Public: ByteArray + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync {} +pub trait Public: CryptoType + ByteArray + Derive + PartialEq + Eq + Clone + Send + Sync {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -834,7 +832,7 @@ impl sp_std::str::FromStr for SecretUri { /// /// For now it just specifies how to create a key from a phrase and derivation path. #[cfg(feature = "full_crypto")] -pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { +pub trait Pair: CryptoType + Sized { /// The type which is used to encode a public key. type Public: Public + Hash; @@ -872,7 +870,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { (pair, phrase.to_owned(), seed) } - /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. + /// Returns the KeyPair from the English BIP39 seed `phrase`, or an error if it's invalid. #[cfg(feature = "std")] fn from_phrase( phrase: &str, @@ -907,7 +905,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { } /// Make a new key pair from secret seed material. The slice must be the correct size or - /// it will return `None`. + /// an error will be returned. /// /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed /// by an attacker then they can also derive your key. @@ -949,8 +947,6 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will /// generally be equivalent to no password at all. - /// - /// `None` is returned if no matches are found. #[cfg(feature = "std")] fn from_string_with_seed( s: &str, @@ -1146,6 +1142,8 @@ pub mod key_types { pub const ACCOUNT: KeyTypeId = KeyTypeId(*b"acco"); /// Key type for Aura module, built-in. Identified as `aura`. pub const AURA: KeyTypeId = KeyTypeId(*b"aura"); + /// Key type for BEEFY module. + pub const BEEFY: KeyTypeId = KeyTypeId(*b"beef"); /// Key type for ImOnline module, built-in. Identified as `imon`. pub const IM_ONLINE: KeyTypeId = KeyTypeId(*b"imon"); /// Key type for AuthorityDiscovery module, built-in. Identified as `audi`. @@ -1158,6 +1156,52 @@ pub mod key_types { pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy"); } +/// Create random values of `Self` given a stream of entropy. +pub trait FromEntropy: Sized { + /// Create a random value of `Self` given a stream of random bytes on `input`. May only fail if + /// `input` has an error. + fn from_entropy(input: &mut impl codec::Input) -> Result; +} + +impl FromEntropy for bool { + fn from_entropy(input: &mut impl codec::Input) -> Result { + Ok(input.read_byte()? % 2 == 1) + } +} + +macro_rules! impl_from_entropy { + ($type:ty , $( $others:tt )*) => { + impl_from_entropy!($type); + impl_from_entropy!($( $others )*); + }; + ($type:ty) => { + impl FromEntropy for $type { + fn from_entropy(input: &mut impl codec::Input) -> Result { + ::decode(input) + } + } + } +} + +macro_rules! impl_from_entropy_base { + ($type:ty , $( $others:tt )*) => { + impl_from_entropy_base!($type); + impl_from_entropy_base!($( $others )*); + }; + ($type:ty) => { + impl_from_entropy!($type, + [$type; 1], [$type; 2], [$type; 3], [$type; 4], [$type; 5], [$type; 6], [$type; 7], [$type; 8], + [$type; 9], [$type; 10], [$type; 11], [$type; 12], [$type; 13], [$type; 14], [$type; 15], [$type; 16], + [$type; 17], [$type; 18], [$type; 19], [$type; 20], [$type; 21], [$type; 22], [$type; 23], [$type; 24], + [$type; 25], [$type; 26], [$type; 27], [$type; 28], [$type; 29], [$type; 30], [$type; 31], [$type; 32], + [$type; 36], [$type; 40], [$type; 44], [$type; 48], [$type; 56], [$type; 64], [$type; 72], [$type; 80], + [$type; 96], [$type; 112], [$type; 128], [$type; 160], [$type; 192], [$type; 224], [$type; 256] + ); + } +} + +impl_from_entropy_base!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/core/src/defer.rs b/primitives/core/src/defer.rs index c5ff502593692..efa9ee5cebb70 100644 --- a/primitives/core/src/defer.rs +++ b/primitives/core/src/defer.rs @@ -25,6 +25,13 @@ #[must_use] pub struct DeferGuard(pub Option); +impl DeferGuard { + /// Creates a new `DeferGuard` with the given closure. + pub fn new(f: F) -> Self { + Self(Some(f)) + } +} + impl Drop for DeferGuard { fn drop(&mut self) { self.0.take().map(|f| f()); diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 0c997bc290008..05bc679386c3d 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -73,6 +73,14 @@ type Seed = [u8; 32]; )] pub struct Public(pub [u8; 33]); +impl crate::crypto::FromEntropy for Public { + fn from_entropy(input: &mut impl codec::Input) -> Result { + let mut result = Self([0u8; 33]); + input.read(&mut result.0[..])?; + Ok(result) + } +} + impl Public { /// A new instance from the given 33-byte `data`. /// @@ -212,7 +220,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&array_bytes::bytes2hex("", self)) } } @@ -409,7 +417,7 @@ impl TraitPair for Pair { } /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { + fn verify>(sig: &Signature, message: M, public: &Public) -> bool { sig.recover(message).map(|actual| actual == *public).unwrap_or_default() } @@ -544,7 +552,7 @@ mod test { let derived = pair.derive(path.into_iter(), None).ok().unwrap(); assert_eq!( derived.0.seed(), - array_bytes::hex2array_unchecked::<32>( + array_bytes::hex2array_unchecked::<_, 32>( "b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61" ) ); diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 29da78626f850..151a7229315eb 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -31,7 +31,9 @@ use scale_info::TypeInfo; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom}; +use crate::crypto::{ + CryptoType, CryptoTypeId, Derive, FromEntropy, Public as TraitPublic, UncheckedFrom, +}; #[cfg(feature = "full_crypto")] use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "full_crypto")] @@ -79,6 +81,14 @@ pub struct Pair { secret: SigningKey, } +impl FromEntropy for Public { + fn from_entropy(input: &mut impl codec::Input) -> Result { + let mut result = Self([0u8; 32]); + input.read(&mut result.0[..])?; + Ok(result) + } +} + impl AsRef<[u8; 32]> for Public { fn as_ref(&self) -> &[u8; 32] { &self.0 @@ -224,7 +234,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&array_bytes::bytes2hex("", self)) } } @@ -411,13 +421,9 @@ impl TraitPair for Pair { /// Verify a signature on a message. /// /// Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { - let Ok(public) = VerificationKey::try_from(public.as_slice()) else { - return false - }; - let Ok(signature) = ed25519_zebra::Signature::try_from(sig.as_ref()) else { - return false - }; + fn verify>(sig: &Signature, message: M, public: &Public) -> bool { + let Ok(public) = VerificationKey::try_from(public.as_slice()) else { return false }; + let Ok(signature) = ed25519_zebra::Signature::try_from(sig.as_ref()) else { return false }; public.verify(&signature, message.as_ref()).is_ok() } @@ -489,7 +495,7 @@ mod test { let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - array_bytes::hex2array_unchecked::<32>( + array_bytes::hex2array_unchecked::<_, 32>( "ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c" ) ); diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index b61009bc640ee..3a0e1f33f16c9 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -55,6 +55,8 @@ pub mod crypto; pub mod hexdisplay; pub use paste; +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; #[cfg(feature = "bls-experimental")] pub mod bls; pub mod defer; @@ -98,45 +100,6 @@ pub use sp_storage as storage; #[doc(hidden)] pub use sp_std; -/// Context for executing a call into the runtime. -pub enum ExecutionContext { - /// Context used for general block import (including locally authored blocks). - Importing, - /// Context used for importing blocks as part of an initial sync of the blockchain. - /// - /// We distinguish between major sync and import so that validators who are running - /// their initial sync (or catching up after some time offline) can use the faster - /// native runtime (since we can reasonably assume the network as a whole has already - /// come to a broad consensus on the block and it probably hasn't been crafted - /// specifically to attack this node), but when importing blocks at the head of the - /// chain in normal operation they can use the safer Wasm version. - Syncing, - /// Context used for block construction. - BlockConstruction, - /// Context used for offchain calls. - /// - /// This allows passing offchain extension and customizing available capabilities. - OffchainCall(Option<(Box, offchain::Capabilities)>), -} - -impl ExecutionContext { - /// Returns the capabilities of particular context. - pub fn capabilities(&self) -> offchain::Capabilities { - use ExecutionContext::*; - - match self { - Importing | Syncing | BlockConstruction => offchain::Capabilities::empty(), - // Enable keystore, transaction pool and Offchain DB reads by default for offchain - // calls. - OffchainCall(None) => - offchain::Capabilities::KEYSTORE | - offchain::Capabilities::OFFCHAIN_DB_READ | - offchain::Capabilities::TRANSACTION_POOL, - OffchainCall(Some((_, capabilities))) => *capabilities, - } - } -} - /// Hex-serialized shim for `Vec`. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))] @@ -442,6 +405,18 @@ pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB /// /// These feature checking macros can be used to conditionally enable/disable code in a dependent /// crate based on a feature in the crate where the macro is called. +/// +/// # Example +///``` +/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $); +/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $); +/// +/// // All the code passed to the macro will then conditionally compiled based on the features +/// // activated for the crate where the macro was generated. +/// check_std_is_enabled! { +/// struct StdEnabled; +/// } +///``` #[macro_export] // We need to skip formatting this macro because of this bug: // https://github.com/rust-lang/rustfmt/issues/5283 diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index a6cef85e6ac1b..cef495dfaacdc 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -260,26 +260,22 @@ impl Timestamp { bitflags::bitflags! { /// Execution context extra capabilities. pub struct Capabilities: u32 { - /// Access to transaction pool. - const TRANSACTION_POOL = 0b0000_0000_0001; /// External http calls. - const HTTP = 0b0000_0000_0010; + const HTTP = 1 << 0; /// Keystore access. - const KEYSTORE = 0b0000_0000_0100; + const KEYSTORE = 1 << 2; /// Randomness source. - const RANDOMNESS = 0b0000_0000_1000; + const RANDOMNESS = 1 << 3; /// Access to opaque network state. - const NETWORK_STATE = 0b0000_0001_0000; + const NETWORK_STATE = 1 << 4; /// Access to offchain worker DB (read only). - const OFFCHAIN_DB_READ = 0b0000_0010_0000; + const OFFCHAIN_DB_READ = 1 << 5; /// Access to offchain worker DB (writes). - const OFFCHAIN_DB_WRITE = 0b0000_0100_0000; + const OFFCHAIN_DB_WRITE = 1 << 6; /// Manage the authorized nodes - const NODE_AUTHORIZATION = 0b0000_1000_0000; + const NODE_AUTHORIZATION = 1 << 7; /// Access time related functionality - const TIME = 0b0001_0000_0000; - /// Access the statement store. - const STATEMENT_STORE = 0b0010_0000_0000; + const TIME = 1 << 8; } } @@ -785,8 +781,8 @@ mod tests { assert!(!none.contains(Capabilities::KEYSTORE)); assert!(all.contains(Capabilities::KEYSTORE)); assert!(some.contains(Capabilities::KEYSTORE)); - assert!(!none.contains(Capabilities::TRANSACTION_POOL)); - assert!(all.contains(Capabilities::TRANSACTION_POOL)); - assert!(!some.contains(Capabilities::TRANSACTION_POOL)); + assert!(!none.contains(Capabilities::RANDOMNESS)); + assert!(all.contains(Capabilities::RANDOMNESS)); + assert!(!some.contains(Capabilities::TIME)); } } diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index 3a114de5bfa3c..4db839f1a451a 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -17,12 +17,14 @@ //! In-memory implementation of offchain workers database. -use crate::offchain::OffchainStorage; +use crate::offchain::{DbExternalities, OffchainStorage, StorageKind, STORAGE_PREFIX}; use std::{ collections::hash_map::{Entry, HashMap}, iter::Iterator, }; +const LOG_TARGET: &str = "offchain-worker::storage"; + /// In-memory storage for offchain workers. #[derive(Debug, Clone, Default)] pub struct InMemOffchainStorage { @@ -88,3 +90,95 @@ impl OffchainStorage for InMemOffchainStorage { } } } + +fn unavailable_yet(name: &str) -> R { + tracing::error!( + target: LOG_TARGET, + "The {:?} API is not available for offchain workers yet. Follow \ + https://github.com/paritytech/substrate/issues/1458 for details", + name + ); + Default::default() +} + +const LOCAL_DB: &str = "LOCAL (fork-aware) DB"; + +/// Offchain DB that implements [`DbExternalities`] for [`OffchainStorage`]. +#[derive(Debug, Clone)] +pub struct OffchainDb { + /// Persistent storage database. + persistent: Storage, +} + +impl OffchainDb { + /// Create new instance of Offchain DB. + pub fn new(persistent: Storage) -> Self { + Self { persistent } + } +} + +impl DbExternalities for OffchainDb { + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + tracing::debug!( + target: LOG_TARGET, + ?kind, + key = ?array_bytes::bytes2hex("", key), + value = ?array_bytes::bytes2hex("", value), + "Write", + ); + match kind { + StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { + tracing::debug!( + target: LOG_TARGET, + ?kind, + key = ?array_bytes::bytes2hex("", key), + "Clear", + ); + match kind { + StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + tracing::debug!( + target: LOG_TARGET, + ?kind, + key = ?array_bytes::bytes2hex("", key), + new_value = ?array_bytes::bytes2hex("", new_value), + old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)), + "CAS", + ); + match kind { + StorageKind::PERSISTENT => + self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + let result = match kind { + StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + }; + tracing::debug!( + target: LOG_TARGET, + ?kind, + key = ?array_bytes::bytes2hex("", key), + result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)), + "Read", + ); + result + } +} diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index cfcdd6a9fdd41..ffa52ef97d1f5 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -37,7 +37,10 @@ use schnorrkel::{ use sp_std::vec::Vec; use crate::{ - crypto::{ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom}, + crypto::{ + ByteArray, CryptoType, CryptoTypeId, Derive, FromEntropy, Public as TraitPublic, + UncheckedFrom, + }, hash::{H256, H512}, }; use codec::{Decode, Encode, MaxEncodedLen}; @@ -91,6 +94,14 @@ impl Clone for Pair { } } +impl FromEntropy for Public { + fn from_entropy(input: &mut impl codec::Input) -> Result { + let mut result = Self([0u8; 32]); + input.read(&mut result.0[..])?; + Ok(result) + } +} + impl AsRef<[u8; 32]> for Public { fn as_ref(&self) -> &[u8; 32] { &self.0 @@ -229,7 +240,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&array_bytes::bytes2hex("", self)) } } @@ -493,13 +504,9 @@ impl TraitPair for Pair { self.0.sign(context.bytes(message)).into() } - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - let Ok(signature) = schnorrkel::Signature::from_bytes(sig.as_ref()) else { - return false - }; - let Ok(public) = PublicKey::from_bytes(pubkey.as_ref()) else { - return false - }; + fn verify>(sig: &Signature, message: M, pubkey: &Public) -> bool { + let Ok(signature) = schnorrkel::Signature::from_bytes(sig.as_ref()) else { return false }; + let Ok(public) = PublicKey::from_bytes(pubkey.as_ref()) else { return false }; public.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() } @@ -561,7 +568,7 @@ pub mod vrf { impl VrfTranscript { /// Build a new transcript instance. /// - /// Each `data` element is a tuple `(domain, message)` composing the transcipt. + /// Each `data` element is a tuple `(domain, message)` used to build the transcript. pub fn new(label: &'static [u8], data: &[(&'static [u8], &[u8])]) -> Self { let mut transcript = merlin::Transcript::new(label); data.iter().for_each(|(l, b)| transcript.append_message(l, b)); diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index 6faf4ffa3042a..25f5f9012c996 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -21,10 +21,12 @@ use crate::crypto::KeyTypeId; /// Key type for generic Ed25519 key. pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); -/// Key type for generic Sr 25519 key. +/// Key type for generic Sr25519 key. pub const SR25519: KeyTypeId = KeyTypeId(*b"sr25"); /// Key type for generic ECDSA key. pub const ECDSA: KeyTypeId = KeyTypeId(*b"ecds"); +/// Key type for generic Bandersnatch key. +pub const BANDERSNATCH: KeyTypeId = KeyTypeId(*b"band"); /// Key type for generic BLS12-377 key. pub const BLS377: KeyTypeId = KeyTypeId(*b"bls7"); /// Key type for generic BLS12-381 key. diff --git a/primitives/crypto/ec-utils/Cargo.toml b/primitives/crypto/ec-utils/Cargo.toml new file mode 100644 index 0000000000000..9c28c9af9f3bb --- /dev/null +++ b/primitives/crypto/ec-utils/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "sp-crypto-ec-utils" +version = "0.4.0" +authors = ["Parity Technologies "] +description = "Host function interface for common elliptic curve operations in Substrate runtimes" +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +ark-serialize = { version = "0.4.2", default-features = false } +ark-ff = { version = "0.4.2", default-features = false } +ark-ec = { version = "0.4.2", default-features = false } +ark-std = { version = "0.4.0", default-features = false } +ark-bls12-377 = { version = "0.4.0", features = ["curve"], default-features = false } +ark-bls12-381 = { version = "0.4.0", features = ["curve"], default-features = false } +ark-bw6-761 = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false } +sp-std = { version = "8.0.0", path = "../../std", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +ark-scale = { version = "0.0.3", features = ["hazmat"], default-features = false } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../../runtime-interface" } + +[dev-dependencies] +sp-io = { path = "../../io", default-features = false } +ark-algebra-test-templates = { version = "0.4.2", default-features = false } +sp-ark-models = { version = "0.4.0-beta", default-features = false } +sp-ark-bls12-377 = { version = "0.4.0-beta", default-features = false } +sp-ark-bls12-381 = { version = "0.4.0-beta", default-features = false } +sp-ark-bw6-761 = { version = "0.4.0-beta", default-features = false } +sp-ark-ed-on-bls12-377 = { version = "0.4.0-beta", default-features = false } +sp-ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0-beta", default-features = false } + +[features] +default = [ "std" ] +std = [ + "ark-serialize/std", + "ark-ff/std", + "ark-ec/std", + "ark-std/std", + "ark-bls12-377/std", + "ark-bls12-381/std", + "ark-bw6-761/std", + "ark-ed-on-bls12-381-bandersnatch/std", + "ark-ed-on-bls12-377/std", + "sp-std/std", + "codec/std", + "ark-scale/std", + "sp-runtime-interface/std", + "sp-io/std", + "ark-algebra-test-templates/std", + "sp-ark-bls12-377/std", + "sp-ark-bls12-381/std", + "sp-ark-bw6-761/std", + "sp-ark-ed-on-bls12-377/std", + "sp-ark-ed-on-bls12-381-bandersnatch/std", +] diff --git a/primitives/crypto/ec-utils/src/bls12_377.rs b/primitives/crypto/ec-utils/src/bls12_377.rs new file mode 100644 index 0000000000000..9230479b3bec5 --- /dev/null +++ b/primitives/crypto/ec-utils/src/bls12_377.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support functions for bls12_377 to improve the performance of +//! multi_miller_loop, final_exponentiation, msm's and projective +//! multiplications by host function calls + +use crate::utils::{ + final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, +}; +use ark_bls12_377::{g1, g2, Bls12_377}; +use sp_std::vec::Vec; + +/// Compute a multi miller loop through arkworks +pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + multi_miller_loop_generic::(a, b) +} + +/// Compute a final exponentiation through arkworks +pub fn final_exponentiation(target: Vec) -> Result, ()> { + final_exponentiation_generic::(target) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G1. +pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G2. +pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a projective scalar multiplication for short_weierstrass +/// through arkworks on G1. +pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +/// Compute a projective scalar multiplication for short_weierstrass +/// through arkworks on G2. +pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_algebra_test_templates::*; + use sp_ark_bls12_377::{ + Bls12_377 as Bls12_377Host, G1Projective as G1ProjectiveHost, + G2Projective as G2ProjectiveHost, HostFunctions, + }; + + #[derive(PartialEq, Eq)] + struct Host; + + impl HostFunctions for Host { + fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_multi_miller_loop(a, b) + } + fn bls12_377_final_exponentiation(f12: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_final_exponentiation(f12) + } + fn bls12_377_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_msm_g1(bases, bigints) + } + fn bls12_377_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_msm_g2(bases, bigints) + } + fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_mul_projective_g1(base, scalar) + } + fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_377_mul_projective_g2(base, scalar) + } + } + + type Bls12_377 = Bls12_377Host; + type G1Projective = G1ProjectiveHost; + type G2Projective = G2ProjectiveHost; + + test_group!(g1; G1Projective; sw); + test_group!(g2; G2Projective; sw); + test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); + test_pairing!(pairing; super::Bls12_377); +} diff --git a/primitives/crypto/ec-utils/src/bls12_381.rs b/primitives/crypto/ec-utils/src/bls12_381.rs new file mode 100644 index 0000000000000..6c707aa581458 --- /dev/null +++ b/primitives/crypto/ec-utils/src/bls12_381.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support functions for bls12_381 to improve the performance of +//! multi_miller_loop, final_exponentiation, msm's and projective +//! multiplications by host function calls + +use crate::utils::{ + final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, +}; +use ark_bls12_381::{g1, g2, Bls12_381}; +use sp_std::vec::Vec; + +/// Compute a multi miller loop through arkworks +pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + multi_miller_loop_generic::(a, b) +} + +/// Compute a final exponentiation through arkworks +pub fn final_exponentiation(target: Vec) -> Result, ()> { + final_exponentiation_generic::(target) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G1. +pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G2. +pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a projective scalar multiplication for short_weierstrass +/// through arkworks on G1. +pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +/// Compute a projective scalar multiplication for short_weierstrass +/// through arkworks on G2. +pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_algebra_test_templates::*; + use ark_ec::{AffineRepr, CurveGroup, Group}; + use ark_ff::{fields::Field, One, Zero}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; + use ark_std::{rand::Rng, test_rng, vec, UniformRand}; + use sp_ark_bls12_381::{ + fq::Fq, fq2::Fq2, fr::Fr, Bls12_381 as Bls12_381Host, G1Affine as G1AffineHost, + G1Projective as G1ProjectiveHost, G2Affine as G2AffineHost, + G2Projective as G2ProjectiveHost, HostFunctions, + }; + use sp_ark_models::pairing::PairingOutput; + + #[derive(PartialEq, Eq)] + struct Host; + + impl HostFunctions for Host { + fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_multi_miller_loop(a, b) + } + fn bls12_381_final_exponentiation(f12: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_final_exponentiation(f12) + } + fn bls12_381_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_msm_g1(bases, bigints) + } + fn bls12_381_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_msm_g2(bases, bigints) + } + fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_mul_projective_g1(base, scalar) + } + fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bls12_381_mul_projective_g2(base, scalar) + } + } + + type Bls12_381 = Bls12_381Host; + type G1Projective = G1ProjectiveHost; + type G2Projective = G2ProjectiveHost; + type G1Affine = G1AffineHost; + type G2Affine = G2AffineHost; + + test_group!(g1; G1Projective; sw); + test_group!(g2; G2Projective; sw); + test_group!(pairing_output; PairingOutput; msm); + test_pairing!(ark_pairing; super::Bls12_381); + + #[test] + fn test_g1_endomorphism_beta() { + assert!(sp_ark_bls12_381::g1::BETA.pow([3u64]).is_one()); + } + + #[test] + fn test_g1_subgroup_membership_via_endomorphism() { + let mut rng = test_rng(); + let generator = G1Projective::rand(&mut rng).into_affine(); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + } + + #[test] + fn test_g1_subgroup_non_membership_via_endomorphism() { + let mut rng = test_rng(); + loop { + let x = Fq::rand(&mut rng); + let greatest = rng.gen(); + + if let Some(p) = G1Affine::get_point_from_x_unchecked(x, greatest) { + if !::is_zero(&p.mul_bigint(Fr::characteristic())) { + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + return + } + } + } + } + + #[test] + fn test_g2_subgroup_membership_via_endomorphism() { + let mut rng = test_rng(); + let generator = G2Projective::rand(&mut rng).into_affine(); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + } + + #[test] + fn test_g2_subgroup_non_membership_via_endomorphism() { + let mut rng = test_rng(); + loop { + let x = Fq2::rand(&mut rng); + let greatest = rng.gen(); + + if let Some(p) = G2Affine::get_point_from_x_unchecked(x, greatest) { + if !::is_zero(&p.mul_bigint(Fr::characteristic())) { + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + return + } + } + } + } + + // Test vectors and macro adapted from https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs + macro_rules! test_vectors { + ($projective:ident, $affine:ident, $compress:expr, $expected:ident) => { + let mut e = $projective::zero(); + + let mut v = vec![]; + { + let mut expected = $expected; + for _ in 0..1000 { + let e_affine = $affine::from(e); + let mut serialized = vec![0u8; e.serialized_size($compress)]; + e_affine.serialize_with_mode(serialized.as_mut_slice(), $compress).unwrap(); + v.extend_from_slice(&serialized[..]); + + let mut decoded = serialized; + let len_of_encoding = decoded.len(); + (&mut decoded[..]).copy_from_slice(&expected[0..len_of_encoding]); + expected = &expected[len_of_encoding..]; + let decoded = + $affine::deserialize_with_mode(&decoded[..], $compress, Validate::Yes) + .unwrap(); + assert_eq!(e_affine, decoded); + + e += &$projective::generator(); + } + } + + assert_eq!(&v[..], $expected); + }; + } + + #[test] + fn g1_compressed_valid_test_vectors() { + let bytes: &'static [u8] = include_bytes!("test-data/g1_compressed_valid_test_vectors.dat"); + test_vectors!(G1Projective, G1Affine, Compress::Yes, bytes); + } + + #[test] + fn g1_uncompressed_valid_test_vectors() { + let bytes: &'static [u8] = + include_bytes!("test-data/g1_uncompressed_valid_test_vectors.dat"); + test_vectors!(G1Projective, G1Affine, Compress::No, bytes); + } + + #[test] + fn g2_compressed_valid_test_vectors() { + let bytes: &'static [u8] = include_bytes!("test-data/g2_compressed_valid_test_vectors.dat"); + test_vectors!(G2Projective, G2Affine, Compress::Yes, bytes); + } + + #[test] + fn g2_uncompressed_valid_test_vectors() { + let bytes: &'static [u8] = + include_bytes!("test-data/g2_uncompressed_valid_test_vectors.dat"); + test_vectors!(G2Projective, G2Affine, Compress::No, bytes); + } +} diff --git a/primitives/crypto/ec-utils/src/bw6_761.rs b/primitives/crypto/ec-utils/src/bw6_761.rs new file mode 100644 index 0000000000000..2f3b4c3c9c9aa --- /dev/null +++ b/primitives/crypto/ec-utils/src/bw6_761.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support functions for bw6_761 to improve the performance of +//! multi_miller_loop, final_exponentiation, msm's and projective +//! multiplications by host function calls. + +use crate::utils::{ + final_exponentiation_generic, msm_sw_generic, mul_projective_generic, multi_miller_loop_generic, +}; +use ark_bw6_761::{g1, g2, BW6_761}; +use sp_std::vec::Vec; + +/// Compute a multi miller loop through arkworks +pub fn multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + multi_miller_loop_generic::(a, b) +} + +/// Compute a final exponentiation through arkworks +pub fn final_exponentiation(target: Vec) -> Result, ()> { + final_exponentiation_generic::(target) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G1. +pub fn msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks on G2. +pub fn msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a projective scalar multiplication for short_weierstrass through +/// arkworks on G1. +pub fn mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +/// Compute a projective scalar multiplication for short_weierstrass through +/// arkworks on G2. +pub fn mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_algebra_test_templates::*; + use sp_ark_bw6_761::{ + G1Projective as G1ProjectiveHost, G2Projective as G2ProjectiveHost, HostFunctions, + BW6_761 as BW6_761Host, + }; + + #[derive(PartialEq, Eq)] + struct Host; + + impl HostFunctions for Host { + fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_multi_miller_loop(a, b) + } + fn bw6_761_final_exponentiation(f12: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_final_exponentiation(f12) + } + fn bw6_761_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_msm_g1(bases, bigints) + } + fn bw6_761_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_msm_g2(bases, bigints) + } + fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_mul_projective_g1(base, scalar) + } + fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::bw6_761_mul_projective_g2(base, scalar) + } + } + + type BW6_761 = BW6_761Host; + type G1Projective = G1ProjectiveHost; + type G2Projective = G2ProjectiveHost; + + test_group!(g1; G1Projective; sw); + test_group!(g2; G2Projective; sw); + test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); + test_pairing!(pairing; super::BW6_761); +} diff --git a/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs b/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs new file mode 100644 index 0000000000000..84a86286180f7 --- /dev/null +++ b/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support functions for ed_on_bls12_377 to improve the performance of +//! msm and projective multiplication by host function calls + +use crate::utils::{msm_te_generic, mul_projective_te_generic}; +use ark_ed_on_bls12_377::EdwardsConfig; +use sp_std::vec::Vec; + +/// Compute a multi scalar mulitplication for twisted_edwards through +/// arkworks. +pub fn msm(bases: Vec, scalars: Vec) -> Result, ()> { + msm_te_generic::(bases, scalars) +} + +/// Compute a projective scalar multiplication for twisted_edwards +/// through arkworks. +pub fn mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_te_generic::(base, scalar) +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_algebra_test_templates::*; + use sp_ark_ed_on_bls12_377::{EdwardsProjective as EdwardsProjectiveHost, HostFunctions}; + + struct Host {} + + impl HostFunctions for Host { + fn ed_on_bls12_377_msm(bases: Vec, scalars: Vec) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_377_msm(bases, scalars) + } + fn ed_on_bls12_377_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_377_mul_projective(base, scalar) + } + } + + type EdwardsProjective = EdwardsProjectiveHost; + test_group!(te; EdwardsProjective; te); +} diff --git a/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs b/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs new file mode 100644 index 0000000000000..72b68c3b47182 --- /dev/null +++ b/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support functions for ed_on_bls12_381_bandersnatch to improve the +//! performance of msm' and projective multiplications by host function +//! calls. + +use crate::utils::{ + msm_sw_generic, msm_te_generic, mul_projective_generic, mul_projective_te_generic, +}; +use ark_ed_on_bls12_381_bandersnatch::BandersnatchConfig; +use sp_std::vec::Vec; + +/// Compute a multi scalar multiplication for short_weierstrass through +/// arkworks. +pub fn sw_msm(bases: Vec, scalars: Vec) -> Result, ()> { + msm_sw_generic::(bases, scalars) +} + +/// Compute a multi scalar mulitplication for twisted_edwards through +/// arkworks. +pub fn te_msm(bases: Vec, scalars: Vec) -> Result, ()> { + msm_te_generic::(bases, scalars) +} + +/// Compute a projective scalar multiplication for short_weierstrass +/// through arkworks. +pub fn sw_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_generic::(base, scalar) +} + +/// Compute a projective scalar multiplication for twisted_edwards +/// through arkworks. +pub fn te_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + mul_projective_te_generic::(base, scalar) +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_algebra_test_templates::*; + use sp_ark_ed_on_bls12_381_bandersnatch::{ + EdwardsProjective as EdwardsProjectiveHost, HostFunctions, SWProjective as SWProjectiveHost, + }; + + pub struct Host {} + + impl HostFunctions for Host { + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_381_bandersnatch_te_msm(bases, scalars) + } + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_381_bandersnatch_sw_msm(bases, scalars) + } + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_381_bandersnatch_te_mul_projective(base, scalar) + } + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + crate::elliptic_curves::ed_on_bls12_381_bandersnatch_sw_mul_projective(base, scalar) + } + } + + type EdwardsProjective = EdwardsProjectiveHost; + type SWProjective = SWProjectiveHost; + + test_group!(sw; SWProjective; sw); + test_group!(te; EdwardsProjective; te); +} diff --git a/primitives/crypto/ec-utils/src/lib.rs b/primitives/crypto/ec-utils/src/lib.rs new file mode 100644 index 0000000000000..c1877dd5b5d72 --- /dev/null +++ b/primitives/crypto/ec-utils/src/lib.rs @@ -0,0 +1,264 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The main elliptic curves trait, allowing Substrate to call into host functions +//! for operations on elliptic curves. + +#![warn(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod bls12_377; +pub mod bls12_381; +pub mod bw6_761; +pub mod ed_on_bls12_377; +pub mod ed_on_bls12_381_bandersnatch; +mod utils; + +use sp_runtime_interface::runtime_interface; + +/// Interfaces for working with elliptic curves related types from within the runtime. +/// All type are (de-)serialized through the wrapper types from the ark-scale trait, +/// with ark_scale::{ArkScale, ArkScaleProjective}; +#[runtime_interface] +pub trait EllipticCurves { + /// Compute a multi Miller loop for bls12_37 + /// Receives encoded: + /// a: ArkScale>> + /// b: ArkScale>> + /// Returns encoded: ArkScale>> + fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + bls12_377::multi_miller_loop(a, b) + } + + /// Compute a final exponentiation for bls12_377 + /// Receives encoded: ArkScale>> + /// Returns encoded: ArkScale>> + fn bls12_377_final_exponentiation(f12: Vec) -> Result, ()> { + bls12_377::final_exponentiation(f12) + } + + /// Compute a projective multiplication on G1 for bls12_377 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + bls12_377::mul_projective_g1(base, scalar) + } + + /// Compute a projective multiplication on G2 for bls12_377 + /// through arkworks on G2 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + bls12_377::mul_projective_g2(base, scalar) + } + + /// Compute a msm on G1 for bls12_377 + /// Receives encoded: + /// bases: ArkScale<&[ark_bls12_377::G1Affine]> + /// scalars: ArkScale<&[ark_bls12_377::Fr]> + /// Returns encoded: ArkScaleProjective + fn bls12_377_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + bls12_377::msm_g1(bases, scalars) + } + + /// Compute a msm on G2 for bls12_377 + /// Receives encoded: + /// bases: ArkScale<&[ark_bls12_377::G2Affine]> + /// scalars: ArkScale<&[ark_bls12_377::Fr]> + /// Returns encoded: ArkScaleProjective + fn bls12_377_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + bls12_377::msm_g2(bases, scalars) + } + + /// Compute a multi Miller loop on bls12_381 + /// Receives encoded: + /// a: ArkScale>> + /// b: ArkScale>> + /// Returns encoded: ArkScale>> + fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + bls12_381::multi_miller_loop(a, b) + } + + /// Compute a final exponentiation on bls12_381 + /// Receives encoded: ArkScale>> + /// Returns encoded:ArkScale>> + fn bls12_381_final_exponentiation(f12: Vec) -> Result, ()> { + bls12_381::final_exponentiation(f12) + } + + /// Compute a projective multiplication on G1 for bls12_381 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + bls12_381::mul_projective_g1(base, scalar) + } + + /// Compute a projective multiplication on G2 for bls12_381 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + bls12_381::mul_projective_g2(base, scalar) + } + + /// Compute a msm on G1 for bls12_381 + /// Receives encoded: + /// bases: ArkScale<&[ark_bls12_381::G1Affine]> + /// scalars: ArkScale<&[ark_bls12_381::Fr]> + /// Returns encoded: ArkScaleProjective + fn bls12_381_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + bls12_381::msm_g1(bases, scalars) + } + + /// Compute a msm on G2 for bls12_381 + /// Receives encoded: + /// bases: ArkScale<&[ark_bls12_381::G2Affine]> + /// scalars: ArkScale<&[ark_bls12_381::Fr]> + /// Returns encoded: ArkScaleProjective + fn bls12_381_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + bls12_381::msm_g2(bases, scalars) + } + + /// Compute a multi Miller loop on bw6_761 + /// Receives encoded: + /// a: ArkScale>> + /// b: ArkScale>> + /// Returns encoded: ArkScale>> + fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + bw6_761::multi_miller_loop(a, b) + } + + /// Compute a final exponentiation on bw6_761 + /// Receives encoded: ArkScale>> + /// Returns encoded: ArkScale>> + fn bw6_761_final_exponentiation(f12: Vec) -> Result, ()> { + bw6_761::final_exponentiation(f12) + } + + /// Compute a projective multiplication on G1 for bw6_761 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + bw6_761::mul_projective_g1(base, scalar) + } + + /// Compute a projective multiplication on G2 for bw6_761 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + bw6_761::mul_projective_g2(base, scalar) + } + + /// Compute a msm on G1 for bw6_761 + /// Receives encoded: + /// bases: ArkScale<&[ark_bw6_761::G1Affine]> + /// scalars: ArkScale<&[ark_bw6_761::Fr]> + /// Returns encoded: ArkScaleProjective + fn bw6_761_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { + bw6_761::msm_g1(bases, bigints) + } + + /// Compute a msm on G2 for bw6_761 + /// Receives encoded: + /// bases: ArkScale<&[ark_bw6_761::G2Affine]> + /// scalars: ArkScale<&[ark_bw6_761::Fr]> + /// Returns encoded: ArkScaleProjective + fn bw6_761_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { + bw6_761::msm_g2(bases, bigints) + } + + /// Compute projective multiplication on ed_on_bls12_377 + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn ed_on_bls12_377_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + ed_on_bls12_377::mul_projective(base, scalar) + } + + /// Compute msm on ed_on_bls12_377 + /// Receives encoded: + /// bases: ArkScale<&[ark_ed_on_bls12_377::EdwardsAffine]> + /// scalars: + /// ArkScale<&[ark_ed_on_bls12_377::Fr]> + /// Returns encoded: + /// ArkScaleProjective + fn ed_on_bls12_377_msm(bases: Vec, scalars: Vec) -> Result, ()> { + ed_on_bls12_377::msm(bases, scalars) + } + + /// Compute short weierstrass projective multiplication on ed_on_bls12_381_bandersnatch + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + ed_on_bls12_381_bandersnatch::sw_mul_projective(base, scalar) + } + + /// Compute twisted edwards projective multiplication on ed_on_bls12_381_bandersnatch + /// Receives encoded: + /// base: ArkScaleProjective + /// scalar: ArkScale<&[u64]> + /// Returns encoded: ArkScaleProjective + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + ed_on_bls12_381_bandersnatch::te_mul_projective(base, scalar) + } + + /// Compute short weierstrass msm on ed_on_bls12_381_bandersnatch + /// Receives encoded: + /// bases: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::SWAffine]> + /// scalars: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]> + /// Returns encoded: + /// ArkScaleProjective + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + ed_on_bls12_381_bandersnatch::sw_msm(bases, scalars) + } + + /// Compute twisted edwards msm on ed_on_bls12_381_bandersnatch + /// Receives encoded: + /// base: ArkScaleProjective + /// scalars: ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]> + /// Returns encoded: + /// ArkScaleProjective + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + ed_on_bls12_381_bandersnatch::te_msm(bases, scalars) + } +} diff --git a/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat b/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat new file mode 100644 index 0000000000000..ea8cd67652d13 Binary files /dev/null and b/primitives/crypto/ec-utils/src/test-data/g1_compressed_valid_test_vectors.dat differ diff --git a/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat b/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat new file mode 100644 index 0000000000000..86abfba945c7b Binary files /dev/null and b/primitives/crypto/ec-utils/src/test-data/g1_uncompressed_valid_test_vectors.dat differ diff --git a/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat b/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat new file mode 100644 index 0000000000000..a40bbe251d90e Binary files /dev/null and b/primitives/crypto/ec-utils/src/test-data/g2_compressed_valid_test_vectors.dat differ diff --git a/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat b/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat new file mode 100644 index 0000000000000..92e4bc528e893 Binary files /dev/null and b/primitives/crypto/ec-utils/src/test-data/g2_uncompressed_valid_test_vectors.dat differ diff --git a/primitives/crypto/ec-utils/src/utils.rs b/primitives/crypto/ec-utils/src/utils.rs new file mode 100644 index 0000000000000..5560d59211605 --- /dev/null +++ b/primitives/crypto/ec-utils/src/utils.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The generic executions of the operations on arkworks elliptic curves +//! which get instantiatied by the corresponding curves. +use ark_ec::{ + pairing::{MillerLoopOutput, Pairing, PairingOutput}, + short_weierstrass, + short_weierstrass::SWCurveConfig, + twisted_edwards, + twisted_edwards::TECurveConfig, + CurveConfig, VariableBaseMSM, +}; +use ark_scale::hazmat::ArkScaleProjective; +use ark_std::vec::Vec; +use codec::{Decode, Encode}; + +const HOST_CALL: ark_scale::Usage = ark_scale::HOST_CALL; +type ArkScale = ark_scale::ArkScale; + +pub(crate) fn multi_miller_loop_generic( + g1: Vec, + g2: Vec, +) -> Result, ()> { + let g1 = ::G1Affine>> as Decode>::decode(&mut g1.as_slice()) + .map_err(|_| ())?; + let g2 = ::G2Affine>> as Decode>::decode(&mut g2.as_slice()) + .map_err(|_| ())?; + + let result = Curve::multi_miller_loop(g1.0, g2.0).0; + + let result: ArkScale<::TargetField> = result.into(); + Ok(result.encode()) +} + +pub(crate) fn final_exponentiation_generic(target: Vec) -> Result, ()> { + let target = + ::TargetField> as Decode>::decode(&mut target.as_slice()) + .map_err(|_| ())?; + + let result = Curve::final_exponentiation(MillerLoopOutput(target.0)).ok_or(())?; + + let result: ArkScale> = result.into(); + Ok(result.encode()) +} + +pub(crate) fn msm_sw_generic( + bases: Vec, + scalars: Vec, +) -> Result, ()> { + let bases = + >> as Decode>::decode(&mut bases.as_slice()) + .map_err(|_| ())?; + let scalars = ::ScalarField>> as Decode>::decode( + &mut scalars.as_slice(), + ) + .map_err(|_| ())?; + + let result = + as VariableBaseMSM>::msm(&bases.0, &scalars.0) + .map_err(|_| ())?; + + let result: ArkScaleProjective> = result.into(); + Ok(result.encode()) +} + +pub(crate) fn msm_te_generic( + bases: Vec, + scalars: Vec, +) -> Result, ()> { + let bases = + >> as Decode>::decode(&mut bases.as_slice()) + .map_err(|_| ())?; + let scalars = ::ScalarField>> as Decode>::decode( + &mut scalars.as_slice(), + ) + .map_err(|_| ())?; + + let result = as VariableBaseMSM>::msm(&bases.0, &scalars.0) + .map_err(|_| ())?; + + let result: ArkScaleProjective> = result.into(); + Ok(result.encode()) +} + +pub(crate) fn mul_projective_generic( + base: Vec, + scalar: Vec, +) -> Result, ()> { + let base = > as Decode>::decode( + &mut base.as_slice(), + ) + .map_err(|_| ())?; + let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; + + let result = ::mul_projective(&base.0, &scalar.0); + + let result: ArkScaleProjective> = result.into(); + Ok(result.encode()) +} + +pub(crate) fn mul_projective_te_generic( + base: Vec, + scalar: Vec, +) -> Result, ()> { + let base = > as Decode>::decode( + &mut base.as_slice(), + ) + .map_err(|_| ())?; + let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; + + let result = ::mul_projective(&base.0, &scalar.0); + + let result: ArkScaleProjective> = result.into(); + Ok(result.encode()) +} diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 183d7bd5a7720..bbac79a846504 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "5.0.0" +version = "8.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.26" -syn = "2.0.14" +quote = "1.0.28" +syn = "2.0.16" proc-macro2 = "1.0.56" [features] diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 0777111d88c22..71dbd122bebec 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.13.0" +version = "0.19.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2021" @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } environmental = { version = "1.1.3", default-features = false } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-storage = { version = "7.0.0", default-features = false, path = "../storage" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-storage = { version = "13.0.0", default-features = false, path = "../storage" } [features] default = ["std"] diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index 84155227a713e..8b0bbd2c5921b 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -42,6 +42,12 @@ pub trait Extension: Send + Any { fn as_mut_any(&mut self) -> &mut dyn Any; } +impl Extension for Box { + fn as_mut_any(&mut self) -> &mut dyn Any { + (**self).as_mut_any() + } +} + /// Macro for declaring an extension that usable with [`Extensions`]. /// /// The extension will be an unit wrapper struct that implements [`Extension`], `Deref` and @@ -190,6 +196,14 @@ impl Extensions { pub fn iter_mut(&mut self) -> impl Iterator)> { self.extensions.iter_mut() } + + /// Merge `other` into `self`. + /// + /// If both contain the same extension, the extension instance of `other` will overwrite the + /// instance found in `self`. + pub fn merge(&mut self, other: Self) { + self.extensions.extend(other.extensions); + } } impl Extend for Extensions { diff --git a/primitives/genesis-builder/Cargo.toml b/primitives/genesis-builder/Cargo.toml new file mode 100644 index 0000000000000..d17c6b5da8024 --- /dev/null +++ b/primitives/genesis-builder/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "sp-genesis-builder" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate GenesisConfig builder API" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +serde_json = { version = "1.0.85", default-features = false, features = ["alloc"] } + +[features] +default = [ "std" ] +std = [ + "sp-api/std", + "sp-std/std", + "serde_json/std", + "sp-runtime/std" +] diff --git a/primitives/genesis-builder/README.md b/primitives/genesis-builder/README.md new file mode 100644 index 0000000000000..4a842c95e358e --- /dev/null +++ b/primitives/genesis-builder/README.md @@ -0,0 +1,5 @@ +Substrate genesis builder. + +Refer to the module doc for more details. + +License: Apache-2.0 diff --git a/primitives/genesis-builder/src/lib.rs b/primitives/genesis-builder/src/lib.rs new file mode 100644 index 0000000000000..e002cd3aa6f70 --- /dev/null +++ b/primitives/genesis-builder/src/lib.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Substrate genesis config builder +//! +//! This Runtime API allows to construct `GenesisConfig`, in particular: +//! - serialize the runtime default `GenesisConfig` struct into json format, +//! - put the GenesisConfig struct into the storage. Internally this operation calls +//! `GenesisBuild::build` function for all runtime pallets, which is typically provided by +//! pallet's author. +//! - deserialize the `GenesisConfig` from given json blob and put `GenesisConfig` into the state +//! storage. Allows to build customized configuration. +//! +//! Providing externalities with empty storage and putting `GenesisConfig` into storage allows to +//! catch and build the raw storage of `GenesisConfig` which is the foundation for genesis block. + +/// The result type alias, used in build methods. `Err` contains formatted error message. +pub type Result = core::result::Result<(), sp_runtime::RuntimeString>; + +sp_api::decl_runtime_apis! { + /// API to interact with GenesisConfig for the runtime + pub trait GenesisBuilder { + /// Creates the default `GenesisConfig` and returns it as a JSON blob. + /// + /// This function instantiates the default `GenesisConfig` struct for the runtime and serializes it into a JSON + /// blob. It returns a `Vec` containing the JSON representation of the default `GenesisConfig`. + fn create_default_config() -> sp_std::vec::Vec; + + /// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. + /// + /// This function deserializes the full `GenesisConfig` from the given JSON blob and puts it into the storage. + /// If the provided JSON blob is incorrect or incomplete or the deserialization fails, an error is returned. + /// It is recommended to log any errors encountered during the process. + /// + /// Please note that provided json blob must contain all `GenesisConfig` fields, no defaults will be used. + fn build_config(json: sp_std::vec::Vec) -> Result; + } +} diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 2f5616770d681..d42588ad21a36 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -15,13 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", optional = true, default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "24.0.0", optional = true, default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [dev-dependencies] futures = "0.3.21" @@ -32,7 +31,6 @@ std = [ "async-trait", "codec/std", "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", "thiserror", diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index c6e716396aea4..8590778714121 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "7.0.0" +version = "23.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -17,18 +17,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["bytes"] } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../keystore" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../keystore" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { version = "0.13.0", default-features = false, optional = true, path = "../state-machine" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "7.0.0", default-features = false, optional = true, path = "../trie" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } -sp-tracing = { version = "6.0.0", default-features = false, path = "../tracing" } +sp-state-machine = { version = "0.28.0", default-features = false, optional = true, path = "../state-machine" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "22.0.0", default-features = false, optional = true, path = "../trie" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../externalities" } +sp-tracing = { version = "10.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } -futures = { version = "0.3.21", features = ["thread-pool"], optional = true } secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} @@ -47,7 +46,6 @@ std = [ "bytes/std", "sp-externalities/std", "sp-core/std", - "sp-keystore", "codec/std", "sp-std/std", "sp-trie/std", @@ -59,9 +57,9 @@ std = [ "tracing/std", "tracing-core/std", "log", - "futures", "ed25519-dalek", "ed25519", + "sp-keystore/std" ] with-tracing = [ @@ -94,3 +92,17 @@ disable_allocator = [] # host function to be supported by the host. Do *not* enable it for your # runtime without first upgrading your host client! improved_panic_error_reporting = [] + +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bls-experimental = [ + "sp-keystore/bls-experimental", +] + +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = [ + "sp-keystore/bandersnatch-experimental", +] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 750b5d5924637..0bc434a2a59fc 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -15,19 +15,67 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! I/O host interface for substrate runtime. +//! # Substrate Primitives: IO +//! +//! This crate contains interfaces for the runtime to communicate with the outside world, ergo `io`. +//! In other context, such interfaces are referred to as "**host functions**". +//! +//! Each set of host functions are defined with an instance of the +//! [`sp_runtime_interface::runtime_interface`] macro. +//! +//! Most notably, this crate contains host functions for: +//! +//! - [`hashing`] +//! - [`crypto`] +//! - [`trie`] +//! - [`offchain`] +//! - [`storage`] +//! - [`allocator`] +//! - [`logging`] +//! +//! All of the default host functions provided by this crate, and by default contained in all +//! substrate-based clients are amalgamated in [`SubstrateHostFunctions`]. +//! +//! ## Externalities +//! +//! Host functions go hand in hand with the concept of externalities. Externalities are an +//! environment in which host functions are provided, and thus can be accessed. Some host functions +//! are only accessible in an externality environment that provides it. +//! +//! A typical error for substrate developers is the following: +//! +//! ```should_panic +//! use sp_io::storage::get; +//! # fn main() { +//! let data = get(b"hello world"); +//! # } +//! ``` +//! +//! This code will panic with the following error: +//! +//! ```no_compile +//! thread 'main' panicked at '`get_version_1` called outside of an Externalities-provided environment.' +//! ``` +//! +//! Such error messages should always be interpreted as "code accessing host functions accessed +//! outside of externalities". +//! +//! An externality is any type that implements [`sp_externalities::Externalities`]. A simple example +//! of which is [`TestExternalities`], which is commonly used in tests and is exported from this +//! crate. +//! +//! ``` +//! use sp_io::{storage::get, TestExternalities}; +//! # fn main() { +//! TestExternalities::default().execute_with(|| { +//! let data = get(b"hello world"); +//! }); +//! # } +//! ``` #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(enable_alloc_error_handler, feature(alloc_error_handler))] -#![cfg_attr( - feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." -)] -#![cfg_attr( - not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library." -)] use sp_std::vec::Vec; @@ -44,6 +92,8 @@ use sp_core::{ #[cfg(feature = "std")] use sp_keystore::KeystoreExt; +#[cfg(feature = "bandersnatch-experimental")] +use sp_core::bandersnatch; use sp_core::{ crypto::KeyTypeId, ecdsa, ed25519, @@ -55,6 +105,9 @@ use sp_core::{ LogLevel, LogLevelFilter, OpaquePeerId, H256, }; +#[cfg(feature = "bls-experimental")] +use sp_core::bls377; + #[cfg(feature = "std")] use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration}; @@ -776,9 +829,7 @@ pub trait Crypto { return false }; - let Ok(sig) = ed25519_dalek::Signature::from_bytes(&sig.0) else { - return false - }; + let Ok(sig) = ed25519_dalek::Signature::from_bytes(&sig.0) else { return false }; public_key.verify(msg, &sig).is_ok() } else { @@ -1140,6 +1191,40 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadSignature)?; Ok(pubkey.serialize()) } + + /// Generate an `bls12-377` key for the given key type using an optional `seed` and + /// store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. + #[cfg(feature = "bls-experimental")] + fn bls377_generate(&mut self, id: KeyTypeId, seed: Option>) -> bls377::Public { + let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .bls377_generate_new(id, seed) + .expect("`bls377_generate` failed") + } + + /// Generate a `bandersnatch` key pair for the given key type using an optional + /// `seed` and store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_generate( + &mut self, + id: KeyTypeId, + seed: Option>, + ) -> bandersnatch::Public { + let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .bandersnatch_generate_new(id, seed) + .expect("`bandernatch_generate` failed") + } } /// Interface that provides functions for hashing with different algorithms. diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index db3a8de2b2433..8e9f793636066 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "7.0.0" +version = "24.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -16,5 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" strum = { version = "0.24.1", features = ["derive"], default-features = false } -sp-core = { version = "7.0.0", path = "../core" } -sp-runtime = { version = "7.0.0", path = "../runtime" } +sp-core = { version = "21.0.0", path = "../core" } +sp-runtime = { version = "24.0.0", path = "../runtime" } + +[features] +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/primitives/keyring/src/bandersnatch.rs b/primitives/keyring/src/bandersnatch.rs new file mode 100644 index 0000000000000..8de6786a6fbf6 --- /dev/null +++ b/primitives/keyring/src/bandersnatch.rs @@ -0,0 +1,209 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A set of well-known keys used for testing. + +pub use sp_core::bandersnatch; +use sp_core::{ + bandersnatch::{Pair, Public, Signature}, + crypto::UncheckedFrom, + ByteArray, Pair as PairT, +}; + +use lazy_static::lazy_static; +use std::{collections::HashMap, ops::Deref, sync::Mutex}; + +/// Set of test accounts. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +pub enum Keyring { + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, +} + +const PUBLIC_RAW_LEN: usize = ::LEN; + +impl Keyring { + pub fn from_public(who: &Public) -> Option { + Self::iter().find(|&k| &Public::from(k) == who) + } + + pub fn from_raw_public(who: [u8; PUBLIC_RAW_LEN]) -> Option { + Self::from_public(&Public::unchecked_from(who)) + } + + pub fn to_raw_public(self) -> [u8; PUBLIC_RAW_LEN] { + *Public::from(self).as_ref() + } + + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } + + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } + + /// Returns an iterator over all test accounts. + pub fn iter() -> impl Iterator { + ::iter() + } + + pub fn public(self) -> Public { + self.pair().public() + } + + pub fn to_seed(self) -> String { + format!("//{}", self) + } + + /// Create a crypto `Pair` from a numeric value. + pub fn numeric(idx: usize) -> Pair { + Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed") + } +} + +impl From for &'static str { + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } +} + +#[derive(Debug)] +pub struct ParseKeyringError; + +impl std::fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ParseKeyringError") + } +} + +impl std::str::FromStr for Keyring { + type Err = ParseKeyringError; + + fn from_str(s: &str) -> Result::Err> { + match s { + "Alice" => Ok(Keyring::Alice), + "Bob" => Ok(Keyring::Bob), + "Charlie" => Ok(Keyring::Charlie), + "Dave" => Ok(Keyring::Dave), + "Eve" => Ok(Keyring::Eve), + "Ferdie" => Ok(Keyring::Ferdie), + "One" => Ok(Keyring::One), + "Two" => Ok(Keyring::Two), + _ => Err(ParseKeyringError), + } + } +} + +lazy_static! { + static ref PRIVATE_KEYS: Mutex> = + Mutex::new(Keyring::iter().map(|who| (who, who.pair())).collect()); + static ref PUBLIC_KEYS: HashMap = PRIVATE_KEYS + .lock() + .unwrap() + .iter() + .map(|(&who, pair)| (who, pair.public())) + .collect(); +} + +impl From for Public { + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap() + } +} + +impl From for Pair { + fn from(k: Keyring) -> Self { + k.pair() + } +} + +impl From for [u8; PUBLIC_RAW_LEN] { + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_ref() + } +} + +impl From for &'static [u8; PUBLIC_RAW_LEN] { + fn from(k: Keyring) -> Self { + PUBLIC_KEYS.get(&k).unwrap().as_ref() + } +} + +impl AsRef<[u8; PUBLIC_RAW_LEN]> for Keyring { + fn as_ref(&self) -> &[u8; PUBLIC_RAW_LEN] { + PUBLIC_KEYS.get(self).unwrap().as_ref() + } +} + +impl AsRef for Keyring { + fn as_ref(&self) -> &Public { + PUBLIC_KEYS.get(self).unwrap() + } +} + +impl Deref for Keyring { + type Target = [u8; PUBLIC_RAW_LEN]; + fn deref(&self) -> &[u8; PUBLIC_RAW_LEN] { + PUBLIC_KEYS.get(self).unwrap().as_ref() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{bandersnatch::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); + } +} diff --git a/primitives/keyring/src/lib.rs b/primitives/keyring/src/lib.rs index 7432aff12544a..1db18f7edbdc8 100644 --- a/primitives/keyring/src/lib.rs +++ b/primitives/keyring/src/lib.rs @@ -23,11 +23,17 @@ pub mod sr25519; /// Test account crypto for ed25519. pub mod ed25519; +/// Test account crypto for bandersnatch. +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; + /// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, /// since it tends to be used for accounts (although it may also be used /// by authorities). pub use sr25519::Keyring as AccountKeyring; +#[cfg(feature = "bandersnatch-experimental")] +pub use bandersnatch::Keyring as BandersnatchKeyring; pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index a749b95a483b1..7e551b7cbf268 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keystore" -version = "0.13.0" +version = "0.27.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -13,13 +13,11 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -futures = "0.3.21" +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parking_lot = { version = "0.12.1", default-features = false } -serde = { version = "1.0", optional = true } thiserror = "1.0" -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../externalities" } [dev-dependencies] rand = "0.7.2" @@ -29,11 +27,16 @@ rand_chacha = "0.2.2" default = ["std"] std = [ "codec/std", - "serde", "sp-core/std", "sp-externalities/std", ] -# This feature adds BLS crypto primitives. It should not be used in production since -# the BLS implementation and interface may still be subject to significant change. +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. bls-experimental = ["sp-core/bls-experimental"] + +# This feature adds Bandersnatch crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 1d2a27cb8726c..82062fe7b40a7 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -19,6 +19,8 @@ pub mod testing; +#[cfg(feature = "bandersnatch-experimental")] +use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] use sp_core::{bls377, bls381}; use sp_core::{ @@ -174,37 +176,121 @@ pub trait Keystore: Send + Sync { msg: &[u8; 32], ) -> Result, Error>; - #[cfg(feature = "bls-experimental")] + /// Returns all the bandersnatch public keys for the given key type. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec; + + /// Generate a new bandersnatch key pair for the given key type and an optional seed. + /// + /// Returns an `bandersnatch::Public` key of the generated key pair or an `Err` if + /// something failed during key generation. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result; + + /// Generate an bandersnatch signature for a given message. + /// + /// Receives [`KeyTypeId`] and an [`bandersnatch::Public`] key to be able to map + /// them to a private key that exists in the keystore. + /// + /// Returns an [`bandersnatch::Signature`] or `None` in case the given `key_type` + /// and `public` combination doesn't exist in the keystore. + /// An `Err` will be returned if generating the signature itself failed. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> Result, Error>; + + /// Generate a bandersnatch VRF signature for the given data. + /// + /// Receives [`KeyTypeId`] and an [`bandersnatch::Public`] key to be able to map + /// them to a private key that exists in the keystore. + /// + /// Returns `None` if the given `key_type` and `public` combination doesn't + /// exist in the keystore or an `Err` when something failed. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + ) -> Result, Error>; + + /// Generate a bandersnatch VRF (pre)output for a given input data. + /// + /// Receives [`KeyTypeId`] and an [`bandersnatch::Public`] key to be able to map + /// them to a private key that exists in the keystore. + /// + /// Returns `None` if the given `key_type` and `public` combination doesn't + /// exist in the keystore or an `Err` when something failed. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> Result, Error>; + + /// Generate a bandersnatch ring-VRF signature for the given data. + /// + /// Receives [`KeyTypeId`] and an [`bandersnatch::Public`] key to be able to map + /// them to a private key that exists in the keystore. + /// + /// Also takes a [`bandersnatch::ring_vrf::RingProver`] instance obtained from + /// a valid [`bandersnatch::ring_vrf::RingContext`]. + /// + /// The ring signature is verifiable if the public key corresponding to the + /// signing [`bandersnatch::Pair`] is part of the ring from which the + /// [`bandersnatch::ring_vrf::RingProver`] has been constructed. + /// If not, the produced signature is just useless. + /// + /// Returns `None` if the given `key_type` and `public` combination doesn't + /// exist in the keystore or an `Err` when something failed. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> Result, Error>; + /// Returns all bls12-381 public keys for the given key type. + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, id: KeyTypeId) -> Vec; - #[cfg(feature = "bls-experimental")] /// Returns all bls12-377 public keys for the given key type. + #[cfg(feature = "bls-experimental")] fn bls377_public_keys(&self, id: KeyTypeId) -> Vec; - #[cfg(feature = "bls-experimental")] /// Generate a new bls381 key pair for the given key type and an optional seed. /// /// Returns an `bls381::Public` key of the generated key pair or an `Err` if /// something failed during key generation. + #[cfg(feature = "bls-experimental")] fn bls381_generate_new( &self, key_type: KeyTypeId, seed: Option<&str>, ) -> Result; - #[cfg(feature = "bls-experimental")] /// Generate a new bls377 key pair for the given key type and an optional seed. /// /// Returns an `bls377::Public` key of the generated key pair or an `Err` if /// something failed during key generation. + #[cfg(feature = "bls-experimental")] fn bls377_generate_new( &self, key_type: KeyTypeId, seed: Option<&str>, ) -> Result; - #[cfg(feature = "bls-experimental")] /// Generate a bls381 signature for a given message. /// /// Receives [`KeyTypeId`] and a [`bls381::Public`] key to be able to map @@ -213,6 +299,7 @@ pub trait Keystore: Send + Sync { /// Returns an [`bls381::Signature`] or `None` in case the given `key_type` /// and `public` combination doesn't exist in the keystore. /// An `Err` will be returned if generating the signature itself failed. + #[cfg(feature = "bls-experimental")] fn bls381_sign( &self, key_type: KeyTypeId, @@ -220,7 +307,6 @@ pub trait Keystore: Send + Sync { msg: &[u8], ) -> Result, Error>; - #[cfg(feature = "bls-experimental")] /// Generate a bls377 signature for a given message. /// /// Receives [`KeyTypeId`] and a [`bls377::Public`] key to be able to map @@ -229,6 +315,7 @@ pub trait Keystore: Send + Sync { /// Returns an [`bls377::Signature`] or `None` in case the given `key_type` /// and `public` combination doesn't exist in the keystore. /// An `Err` will be returned if generating the signature itself failed. + #[cfg(feature = "bls-experimental")] fn bls377_sign( &self, key_type: KeyTypeId, @@ -258,6 +345,7 @@ pub trait Keystore: Send + Sync { /// - sr25519 /// - ed25519 /// - ecdsa + /// - bandersnatch /// - bls381 /// - bls377 /// @@ -291,6 +379,12 @@ pub trait Keystore: Send + Sync { self.ecdsa_sign(id, &public, msg)?.map(|s| s.encode()) }, + #[cfg(feature = "bandersnatch-experimental")] + bandersnatch::CRYPTO_ID => { + let public = bandersnatch::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.bandersnatch_sign(id, &public, msg)?.map(|s| s.encode()) + }, #[cfg(feature = "bls-experimental")] bls381::CRYPTO_ID => { let public = bls381::Public::from_slice(public) @@ -309,6 +403,213 @@ pub trait Keystore: Send + Sync { } } +impl Keystore for Arc { + fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + (**self).sr25519_public_keys(key_type) + } + + fn sr25519_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).sr25519_generate_new(key_type, seed) + } + + fn sr25519_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).sr25519_sign(key_type, public, msg) + } + + fn sr25519_vrf_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + data: &sr25519::vrf::VrfSignData, + ) -> Result, Error> { + (**self).sr25519_vrf_sign(key_type, public, data) + } + + fn sr25519_vrf_output( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + input: &sr25519::vrf::VrfInput, + ) -> Result, Error> { + (**self).sr25519_vrf_output(key_type, public, input) + } + + fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + (**self).ed25519_public_keys(key_type) + } + + fn ed25519_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).ed25519_generate_new(key_type, seed) + } + + fn ed25519_sign( + &self, + key_type: KeyTypeId, + public: &ed25519::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).ed25519_sign(key_type, public, msg) + } + + fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec { + (**self).ecdsa_public_keys(key_type) + } + + fn ecdsa_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).ecdsa_generate_new(key_type, seed) + } + + fn ecdsa_sign( + &self, + key_type: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).ecdsa_sign(key_type, public, msg) + } + + fn ecdsa_sign_prehashed( + &self, + key_type: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> Result, Error> { + (**self).ecdsa_sign_prehashed(key_type, public, msg) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec { + (**self).bandersnatch_public_keys(key_type) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).bandersnatch_generate_new(key_type, seed) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).bandersnatch_sign(key_type, public, msg) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + ) -> Result, Error> { + (**self).bandersnatch_vrf_sign(key_type, public, input) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> Result, Error> { + (**self).bandersnatch_vrf_output(key_type, public, input) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> Result, Error> { + (**self).bandersnatch_ring_vrf_sign(key_type, public, input, prover) + } + + #[cfg(feature = "bls-experimental")] + fn bls381_public_keys(&self, id: KeyTypeId) -> Vec { + (**self).bls381_public_keys(id) + } + + #[cfg(feature = "bls-experimental")] + fn bls377_public_keys(&self, id: KeyTypeId) -> Vec { + (**self).bls377_public_keys(id) + } + + #[cfg(feature = "bls-experimental")] + fn bls381_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).bls381_generate_new(key_type, seed) + } + + #[cfg(feature = "bls-experimental")] + fn bls377_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + (**self).bls377_generate_new(key_type, seed) + } + + #[cfg(feature = "bls-experimental")] + fn bls381_sign( + &self, + key_type: KeyTypeId, + public: &bls381::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).bls381_sign(key_type, public, msg) + } + + #[cfg(feature = "bls-experimental")] + fn bls377_sign( + &self, + key_type: KeyTypeId, + public: &bls377::Public, + msg: &[u8], + ) -> Result, Error> { + (**self).bls377_sign(key_type, public, msg) + } + + fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { + (**self).insert(key_type, suri, public) + } + + fn keys(&self, key_type: KeyTypeId) -> Result>, Error> { + (**self).keys(key_type) + } + + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { + (**self).has_keys(public_keys) + } +} + /// A shared pointer to a keystore implementation. pub type KeystorePtr = Arc; @@ -319,6 +620,13 @@ sp_externalities::decl_extension! { impl KeystoreExt { /// Create a new instance of `KeystoreExt` + /// + /// This is more performant as we don't need to wrap keystore in another [`Arc`]. + pub fn from(keystore: KeystorePtr) -> Self { + Self(keystore) + } + + /// Create a new instance of `KeystoreExt` using the given `keystore`. pub fn new(keystore: T) -> Self { Self(Arc::new(keystore)) } diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index e18931a7af883..efa35fd24bf46 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -19,6 +19,8 @@ use crate::{Error, Keystore, KeystorePtr}; +#[cfg(feature = "bandersnatch-experimental")] +use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] use sp_core::{bls377, bls381}; use sp_core::{ @@ -214,6 +216,64 @@ impl Keystore for MemoryKeystore { Ok(sig) } + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + self.generate_new::(key_type, seed) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> Result, Error> { + self.sign::(key_type, public, msg) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + ) -> Result, Error> { + self.vrf_sign::(key_type, public, data) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> Result, Error> { + let sig = self + .pair::(key_type, public) + .map(|pair| pair.ring_vrf_sign(data, prover)); + Ok(sig) + } + + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> Result, Error> { + self.vrf_output::(key_type, public, input) + } + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) @@ -330,7 +390,7 @@ mod tests { } #[test] - fn vrf_sign() { + fn sr25519_vrf_sign() { let store = MemoryKeystore::new(); let secret_uri = "//Alice"; @@ -359,7 +419,7 @@ mod tests { } #[test] - fn vrf_output() { + fn sr25519_vrf_output() { let store = MemoryKeystore::new(); let secret_uri = "//Alice"; @@ -406,4 +466,69 @@ mod tests { let res = store.ecdsa_sign_prehashed(ECDSA, &pair.public(), &msg).unwrap(); assert!(res.is_some()); } + + #[test] + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_vrf_sign() { + use sp_core::testing::BANDERSNATCH; + + let store = MemoryKeystore::new(); + + let secret_uri = "//Alice"; + let key_pair = + bandersnatch::Pair::from_string(secret_uri, None).expect("Generates key pair"); + + let in1 = bandersnatch::vrf::VrfInput::new("in", "foo"); + let sign_data = + bandersnatch::vrf::VrfSignData::new_unchecked(b"Test", Some("m1"), Some(in1)); + + let result = store.bandersnatch_vrf_sign(BANDERSNATCH, &key_pair.public(), &sign_data); + assert!(result.unwrap().is_none()); + + store + .insert(BANDERSNATCH, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); + + let result = store.bandersnatch_vrf_sign(BANDERSNATCH, &key_pair.public(), &sign_data); + + assert!(result.unwrap().is_some()); + } + + #[test] + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_ring_vrf_sign() { + use sp_core::testing::BANDERSNATCH; + + let store = MemoryKeystore::new(); + + let ring_ctx = bandersnatch::ring_vrf::RingContext::new_testing(); + + let mut pks: Vec<_> = (0..16) + .map(|i| bandersnatch::Pair::from_seed(&[i as u8; 32]).public()) + .collect(); + + let prover_idx = 3; + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + + let secret_uri = "//Alice"; + let pair = bandersnatch::Pair::from_string(secret_uri, None).expect("Generates key pair"); + pks[prover_idx] = pair.public(); + + let in1 = bandersnatch::vrf::VrfInput::new("in1", "foo"); + let sign_data = + bandersnatch::vrf::VrfSignData::new_unchecked(b"Test", &["m1", "m2"], [in1]); + + let result = + store.bandersnatch_ring_vrf_sign(BANDERSNATCH, &pair.public(), &sign_data, &prover); + assert!(result.unwrap().is_none()); + + store + .insert(BANDERSNATCH, secret_uri, pair.public().as_ref()) + .expect("Inserts unknown key"); + + let result = + store.bandersnatch_ring_vrf_sign(BANDERSNATCH, &pair.public(), &sign_data, &prover); + + assert!(result.unwrap().is_some()); + } } diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index 4d2c050899493..9ede77ba40be2 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -12,20 +12,20 @@ description = "Merkle Mountain Range primitives." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.136", features = ["derive", "alloc"], default-features = false, optional = true } +serde = { version = "1.0.163", features = ["derive", "alloc"], default-features = false, optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-debug-derive = { version = "5.0.0", default-features = false, path = "../debug-derive" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-debug-derive = { version = "8.0.0", default-features = false, path = "../debug-derive" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } thiserror = "1.0" [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" [features] default = ["std"] diff --git a/primitives/metadata-ir/Cargo.toml b/primitives/metadata-ir/Cargo.toml index 27fada9c6f34e..49fd23e208e41 100644 --- a/primitives/metadata-ir/Cargo.toml +++ b/primitives/metadata-ir/Cargo.toml @@ -13,10 +13,10 @@ documentation = "https://docs.rs/sp-metadata-ir" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -frame-metadata = { version = "15.1.0", default-features = false, features = ["v14", "v15-unstable"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/metadata-ir/src/lib.rs b/primitives/metadata-ir/src/lib.rs index 3ddc2911d4c93..edfa58f861894 100644 --- a/primitives/metadata-ir/src/lib.rs +++ b/primitives/metadata-ir/src/lib.rs @@ -25,7 +25,7 @@ pub use frame_metadata; mod types; -use frame_metadata::{RuntimeMetadataPrefixed, RuntimeMetadataV14}; +use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; mod v14; @@ -35,25 +35,18 @@ mod v15; const V14: u32 = 14; /// Metadata V15. -/// -/// Not yet stable, thus we set it to `u32::MAX`. -const V15: u32 = u32::MAX; +const V15: u32 = 15; /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { + // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { // Latest stable version. - V14 => { - let v14: frame_metadata::v14::RuntimeMetadataV14 = metadata.into(); - Some(v14.into()) - }, + V14 => Some(into_v14(metadata)), // Unstable metadata. - V15 => { - let v15: frame_metadata::v15::RuntimeMetadataV15 = metadata.into(); - Some(v15.into()) - }, + V15 => Some(into_latest(metadata)), _ => None, } } @@ -65,7 +58,13 @@ pub fn supported_versions() -> sp_std::vec::Vec { /// Transform the IR to the latest stable metadata version. pub fn into_latest(metadata: MetadataIR) -> RuntimeMetadataPrefixed { - let latest: RuntimeMetadataV14 = metadata.into(); + let latest: frame_metadata::v15::RuntimeMetadataV15 = metadata.into(); + latest.into() +} + +/// Transform the IR to metadata version 14. +pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: frame_metadata::v14::RuntimeMetadataV14 = metadata.into(); latest.into() } @@ -81,10 +80,19 @@ mod test { extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), version: 0, + address_ty: meta_type::<()>(), + call_ty: meta_type::<()>(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), signed_extensions: vec![], }, ty: meta_type::<()>(), apis: vec![], + outer_enums: OuterEnumsIR { + call_enum_ty: meta_type::<()>(), + event_enum_ty: meta_type::<()>(), + error_enum_ty: meta_type::<()>(), + }, } } diff --git a/primitives/metadata-ir/src/types.rs b/primitives/metadata-ir/src/types.rs index 93ee54891d89f..b107d20a8e2bf 100644 --- a/primitives/metadata-ir/src/types.rs +++ b/primitives/metadata-ir/src/types.rs @@ -39,6 +39,8 @@ pub struct MetadataIR { pub ty: T::Type, /// Metadata of the Runtime API. pub apis: Vec>, + /// The outer enums types as found in the runtime. + pub outer_enums: OuterEnumsIR, } /// Metadata of a runtime trait. @@ -153,9 +155,19 @@ impl IntoPortable for PalletMetadataIR { #[derive(Clone, PartialEq, Eq, Encode, Debug)] pub struct ExtrinsicMetadataIR { /// The type of the extrinsic. + /// + /// Note: Field used for metadata V14 only. pub ty: T::Type, /// Extrinsic version. pub version: u8, + /// The type of the address that signes the extrinsic + pub address_ty: T::Type, + /// The type of the outermost Call enum. + pub call_ty: T::Type, + /// The type of the extrinsic's signature. + pub signature_ty: T::Type, + /// The type of the outermost Extra enum. + pub extra_ty: T::Type, /// The signed extensions in the order they appear in the extrinsic. pub signed_extensions: Vec>, } @@ -167,6 +179,10 @@ impl IntoPortable for ExtrinsicMetadataIR { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), version: self.version, + address_ty: registry.register_type(&self.address_ty), + call_ty: registry.register_type(&self.call_ty), + signature_ty: registry.register_type(&self.signature_ty), + extra_ty: registry.register_type(&self.extra_ty), signed_extensions: registry.map_into_portable(self.signed_extensions), } } @@ -398,3 +414,40 @@ impl From for PalletErrorMetadataIR { Self { ty } } } + +/// The type of the outer enums. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct OuterEnumsIR { + /// The type of the outer `RuntimeCall` enum. + pub call_enum_ty: T::Type, + /// The type of the outer `RuntimeEvent` enum. + pub event_enum_ty: T::Type, + /// The module error type of the + /// [`DispatchError::Module`](https://docs.rs/sp-runtime/24.0.0/sp_runtime/enum.DispatchError.html#variant.Module) variant. + /// + /// The `Module` variant will be 5 scale encoded bytes which are normally decoded into + /// an `{ index: u8, error: [u8; 4] }` struct. This type ID points to an enum type which + /// instead interprets the first `index` byte as a pallet variant, and the remaining `error` + /// bytes as the appropriate `pallet::Error` type. It is an equally valid way to decode the + /// error bytes, and can be more informative. + /// + /// # Note + /// + /// - This type cannot be used directly to decode `sp_runtime::DispatchError` from the chain. + /// It provides just the information needed to decode `sp_runtime::DispatchError::Module`. + /// - Decoding the 5 error bytes into this type will not always lead to all of the bytes being + /// consumed; many error types do not require all of the bytes to represent them fully. + pub error_enum_ty: T::Type, +} + +impl IntoPortable for OuterEnumsIR { + type Output = OuterEnumsIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + OuterEnumsIR { + call_enum_ty: registry.register_type(&self.call_enum_ty), + event_enum_ty: registry.register_type(&self.event_enum_ty), + error_enum_ty: registry.register_type(&self.error_enum_ty), + } + } +} diff --git a/primitives/metadata-ir/src/v15.rs b/primitives/metadata-ir/src/v15.rs index 86441228d008e..a942eb73223b2 100644 --- a/primitives/metadata-ir/src/v15.rs +++ b/primitives/metadata-ir/src/v15.rs @@ -17,20 +17,17 @@ //! Convert the IR to V15 metadata. +use crate::OuterEnumsIR; + use super::types::{ - ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, - PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, - RuntimeApiMetadataIR, RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, - SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, - StorageHasherIR, + ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, SignedExtensionMetadataIR, }; use frame_metadata::v15::{ - ExtrinsicMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, - PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + CustomMetadata, ExtrinsicMetadata, OuterEnums, PalletMetadata, RuntimeApiMetadata, RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV15, - SignedExtensionMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, - StorageHasher, + SignedExtensionMetadata, }; impl From for RuntimeMetadataV15 { @@ -40,6 +37,10 @@ impl From for RuntimeMetadataV15 { ir.extrinsic.into(), ir.ty, ir.apis.into_iter().map(Into::into).collect(), + ir.outer_enums.into(), + // Substrate does not collect yet the custom metadata fields. + // This allows us to extend the V15 easily. + CustomMetadata { map: Default::default() }, ) } } @@ -86,87 +87,6 @@ impl From for PalletMetadata { } } -impl From for StorageEntryModifier { - fn from(ir: StorageEntryModifierIR) -> Self { - match ir { - StorageEntryModifierIR::Optional => StorageEntryModifier::Optional, - StorageEntryModifierIR::Default => StorageEntryModifier::Default, - } - } -} - -impl From for StorageHasher { - fn from(ir: StorageHasherIR) -> Self { - match ir { - StorageHasherIR::Blake2_128 => StorageHasher::Blake2_128, - StorageHasherIR::Blake2_256 => StorageHasher::Blake2_256, - StorageHasherIR::Blake2_128Concat => StorageHasher::Blake2_128Concat, - StorageHasherIR::Twox128 => StorageHasher::Twox128, - StorageHasherIR::Twox256 => StorageHasher::Twox256, - StorageHasherIR::Twox64Concat => StorageHasher::Twox64Concat, - StorageHasherIR::Identity => StorageHasher::Identity, - } - } -} - -impl From for StorageEntryType { - fn from(ir: StorageEntryTypeIR) -> Self { - match ir { - StorageEntryTypeIR::Plain(ty) => StorageEntryType::Plain(ty), - StorageEntryTypeIR::Map { hashers, key, value } => StorageEntryType::Map { - hashers: hashers.into_iter().map(Into::into).collect(), - key, - value, - }, - } - } -} - -impl From for StorageEntryMetadata { - fn from(ir: StorageEntryMetadataIR) -> Self { - StorageEntryMetadata { - name: ir.name, - modifier: ir.modifier.into(), - ty: ir.ty.into(), - default: ir.default, - docs: ir.docs, - } - } -} - -impl From for PalletStorageMetadata { - fn from(ir: PalletStorageMetadataIR) -> Self { - PalletStorageMetadata { - prefix: ir.prefix, - entries: ir.entries.into_iter().map(Into::into).collect(), - } - } -} - -impl From for PalletCallMetadata { - fn from(ir: PalletCallMetadataIR) -> Self { - PalletCallMetadata { ty: ir.ty } - } -} - -impl From for PalletEventMetadata { - fn from(ir: PalletEventMetadataIR) -> Self { - PalletEventMetadata { ty: ir.ty } - } -} - -impl From for PalletConstantMetadata { - fn from(ir: PalletConstantMetadataIR) -> Self { - PalletConstantMetadata { name: ir.name, ty: ir.ty, value: ir.value, docs: ir.docs } - } -} - -impl From for PalletErrorMetadata { - fn from(ir: PalletErrorMetadataIR) -> Self { - PalletErrorMetadata { ty: ir.ty } - } -} - impl From for SignedExtensionMetadata { fn from(ir: SignedExtensionMetadataIR) -> Self { SignedExtensionMetadata { @@ -180,9 +100,22 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - ty: ir.ty, version: ir.version, + address_ty: ir.address_ty, + call_ty: ir.call_ty, + signature_ty: ir.signature_ty, + extra_ty: ir.extra_ty, signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), } } } + +impl From for OuterEnums { + fn from(ir: OuterEnumsIR) -> Self { + OuterEnums { + call_enum_ty: ir.call_enum_ty, + event_enum_ty: ir.event_enum_ty, + error_enum_ty: ir.error_enum_ty, + } + } +} diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 9006a4d199189..cd704082f5713 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -13,13 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [dev-dependencies] rand = "0.8.5" diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 4caba796c1556..acb2b7d89a5c7 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -15,12 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.2.5", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-npos-elections = { version = "4.0.0-dev", path = ".." } -sp-runtime = { version = "7.0.0", path = "../../runtime" } +sp-runtime = { version = "24.0.0", path = "../../runtime" } [[bin]] name = "reduce" diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index 234326ee94dd2..90dbe7eb71478 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -19,7 +19,7 @@ //! //! Given a committee `A` and an edge weight vector `w`, a balanced solution is one that //! -//! 1. it maximizes the sum of member supports, i.e `Argmax { sum(support(c)) }`. for all `c` in +//! 1. it maximizes the sum of member supports, i.e `Argmax { sum(support(c)) }`. for all `c` in //! `A`. //! 2. it minimizes the sum of supports squared, i.e `Argmin { sum(support(c).pow(2)) }` for all `c` //! in `A`. diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 253a231602f77..0afe1ec5bb692 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -404,7 +404,7 @@ impl Voter { }) } - /// This voter's budget + /// This voter's budget. #[inline] pub fn budget(&self) -> ExtendedBalance { self.budget @@ -470,7 +470,7 @@ pub fn to_support_map( // build support struct. for StakedAssignment { who, distribution } in assignments.iter() { for (c, weight_extended) in distribution.iter() { - let mut support = supports.entry(c.clone()).or_default(); + let support = supports.entry(c.clone()).or_default(); support.total = support.total.saturating_add(*weight_extended); support.voters.push((who.clone(), *weight_extended)); } diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index cb567893776e0..6d73bf9212138 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index 9da052b4a05e1..e73cfa98ca418 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "5.0.0" +version = "8.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index ef9fdc544301d..37840c38e79f1 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] rustc-hash = "1.1.0" -serde = { version = "1.0.136", features = ["derive"] } -sp-core = { version = "7.0.0", path = "../core" } +serde = { version = "1.0.163", features = ["derive"] } +sp-core = { version = "21.0.0", path = "../core" } [dev-dependencies] serde_json = "1.0.85" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index f2ddc84b1e2c2..e0d6d9d89bd31 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "7.0.0" +version = "17.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -15,22 +15,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -sp-wasm-interface = { version = "7.0.0", path = "../wasm-interface", default-features = false } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-tracing = { version = "6.0.0", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "6.0.0", path = "proc-macro" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["bytes"] } +sp-wasm-interface = { version = "14.0.0", path = "../wasm-interface", default-features = false } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-tracing = { version = "10.0.0", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "11.0.0", path = "proc-macro" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../externalities" } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.12.0", default-features = false } -sp-storage = { version = "7.0.0", default-features = false, path = "../storage" } +sp-storage = { version = "13.0.0", default-features = false, path = "../storage" } impl-trait-for-tuples = "0.2.2" [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } -sp-state-machine = { version = "0.13.0", path = "../state-machine" } -sp-core = { version = "7.0.0", path = "../core" } -sp-io = { version = "7.0.0", path = "../io" } +sp-state-machine = { version = "0.28.0", path = "../state-machine" } +sp-core = { version = "21.0.0", path = "../core" } +sp-io = { version = "23.0.0", path = "../io" } rustversion = "1.0.6" trybuild = "1.0.74" @@ -45,6 +45,10 @@ std = [ "codec/std", "sp-externalities/std", "primitive-types/std", + "sp-core/std", + "sp-io/std", + "sp-runtime-interface-test-wasm/std", + "sp-state-machine/std" ] # ATTENTION diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 3a63c1ef55dab..4b50dfe2a7a13 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "6.0.0" +version = "11.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -19,5 +19,5 @@ proc-macro = true Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "visit", "fold", "extra-traits"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index f3cdcf7fd54a9..77a9e56eecba5 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -96,6 +96,7 @@ fn generate_extern_host_function( method.sig.ident, ); let return_value = &method.sig.output; + let cfg_attrs = method.attrs.iter().filter(|a| a.path().is_ident("cfg")); let ffi_return_value = match method.sig.output { ReturnType::Default => quote!(), @@ -112,6 +113,7 @@ fn generate_extern_host_function( }; Ok(quote! { + #(#cfg_attrs)* #[doc = #doc_string] pub fn #function ( #( #args ),* ) #return_value { extern "C" { @@ -143,8 +145,10 @@ fn generate_exchangeable_host_function(method: &TraitItemFn) -> Result Vec<&'static dyn #crate_::sp_wasm_interface::Function> { - vec![ #( &#host_function_names as &dyn #crate_::sp_wasm_interface::Function ),* ] + let mut host_functions_list = Vec::new(); + #(#append_hf_bodies)* + host_functions_list } #crate_::sp_wasm_interface::if_wasmtime_is_enabled! { @@ -208,7 +215,7 @@ fn generate_host_function_implementation( method: &RuntimeInterfaceFunction, version: u32, is_wasm_only: bool, -) -> Result<(TokenStream, Ident, TokenStream)> { +) -> Result<(TokenStream, TokenStream, TokenStream)> { let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); let crate_ = generate_crate_access(); @@ -323,10 +330,21 @@ fn generate_host_function_implementation( }); } + let cfg_attrs: Vec<_> = + method.attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect(); + if version > 1 && !cfg_attrs.is_empty() { + return Err(Error::new( + method.span(), + "Conditional compilation is not supported for versioned functions", + )) + } + let implementation = quote! { + #(#cfg_attrs)* #[cfg(feature = "std")] struct #struct_name; + #(#cfg_attrs)* #[cfg(feature = "std")] impl #struct_name { fn call( @@ -341,6 +359,7 @@ fn generate_host_function_implementation( } } + #(#cfg_attrs)* #[cfg(feature = "std")] impl #crate_::sp_wasm_interface::Function for #struct_name { fn name(&self) -> &str { @@ -368,6 +387,7 @@ fn generate_host_function_implementation( }; let register_body = quote! { + #(#cfg_attrs)* registry.register_static( #crate_::sp_wasm_interface::Function::name(&#struct_name), |mut caller: #crate_::sp_wasm_interface::wasmtime::Caller, #(#ffi_args_prototype),*| @@ -399,7 +419,12 @@ fn generate_host_function_implementation( )?; }; - Ok((implementation, struct_name, register_body)) + let append_hf_body = quote! { + #(#cfg_attrs)* + host_functions_list.push(&#struct_name as &dyn #crate_::sp_wasm_interface::Function); + }; + + Ok((implementation, register_body, append_hf_body)) } /// Generate the `wasm_interface::Signature` for the given host function `sig`. diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 058801522a4f0..1f1638880bb6c 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -180,10 +180,19 @@ pub use sp_std; /// None => self.clear_storage(&[1, 2, 3, 4]), /// } /// } +/// +/// /// A function can be gated behind a configuration (`cfg`) attribute. +/// /// To prevent ambiguity and confusion about what will be the final exposed host +/// /// functions list, conditionally compiled functions can't be versioned. +/// /// That is, conditionally compiled functions with `version`s greater than 1 +/// /// are not allowed. +/// #[cfg(feature = "experimental-function")] +/// fn gated_call(data: &[u8]) -> Vec { +/// [42].to_vec() +/// } /// } /// ``` /// -/// /// The given example will generate roughly the following code for native: /// /// ``` @@ -197,6 +206,8 @@ pub use sp_std; /// fn call_version_2(data: &[u8]) -> Vec; /// fn call_version_3(data: &[u8]) -> Vec; /// fn set_or_clear_version_1(&mut self, optional: Option>); +/// #[cfg(feature = "experimental-function")] +/// fn gated_call_version_1(data: &[u8]) -> Vec; /// } /// /// impl Interface for &mut dyn sp_externalities::Externalities { @@ -209,6 +220,8 @@ pub use sp_std; /// None => self.clear_storage(&[1, 2, 3, 4]), /// } /// } +/// #[cfg(feature = "experimental-function")] +/// fn gated_call_version_1(data: &[u8]) -> Vec { [42].to_vec() } /// } /// /// pub fn call(data: &[u8]) -> Vec { @@ -237,6 +250,16 @@ pub use sp_std; /// .expect("`set_or_clear` called outside of an Externalities-provided environment.") /// } /// +/// #[cfg(feature = "experimental-function")] +/// pub fn gated_call(data: &[u8]) -> Vec { +/// gated_call_version_1(data) +/// } +/// +/// #[cfg(feature = "experimental-function")] +/// fn gated_call_version_1(data: &[u8]) -> Vec { +/// <&mut dyn sp_externalities::Externalities as Interface>::gated_call_version_1(data) +/// } +/// /// /// This type implements the `HostFunctions` trait (from `sp-wasm-interface`) and /// /// provides the host implementation for the wasm side. The host implementation converts the /// /// arguments from wasm to native and calls the corresponding native function. @@ -247,28 +270,43 @@ pub use sp_std; /// } /// ``` /// -/// /// The given example will generate roughly the following code for wasm: /// /// ``` /// mod interface { /// mod extern_host_functions_impls { -/// extern "C" { -/// /// Every function is exported as `ext_TRAIT_NAME_FUNCTION_NAME_version_VERSION`. -/// /// -/// /// `TRAIT_NAME` is converted into snake case. -/// /// -/// /// The type for each argument of the exported function depends on -/// /// `::FFIType`. -/// /// -/// /// `data` holds the pointer and the length to the `[u8]` slice. -/// pub fn ext_Interface_call_version_1(data: u64) -> u64; -/// /// `optional` holds the pointer and the length of the encoded value. -/// pub fn ext_Interface_set_or_clear_version_1(optional: u64); +/// /// Every function is exported by the native code as `ext_FUNCTION_NAME_version_VERSION`. +/// /// +/// /// The type for each argument of the exported function depends on +/// /// `::FFIType`. +/// /// +/// /// `key` holds the pointer and the length to the `data` slice. +/// pub fn call(data: &[u8]) -> Vec { +/// extern "C" { pub fn ext_call_version_2(key: u64); } +/// // Should call into extenal `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` +/// // But this is too much to replicate in a doc test so here we just return a dummy vector. +/// // Note that we jump into the latest version not marked as `register_only` (i.e. version 2). +/// Vec::new() +/// } +/// +/// /// `key` holds the pointer and the length of the `option` value. +/// pub fn set_or_clear(option: Option>) { +/// extern "C" { pub fn ext_set_or_clear_version_1(key: u64); } +/// // Same as above +/// } +/// +/// /// `key` holds the pointer and the length to the `data` slice. +/// #[cfg(feature = "experimental-function")] +/// pub fn gated_call(data: &[u8]) -> Vec { +/// extern "C" { pub fn ext_gated_call_version_1(key: u64); } +/// /// Same as above +/// Vec::new() /// } /// } /// -/// /// The type is actually `ExchangeableFunction` (from `sp-runtime-interface`). +/// /// The type is actually `ExchangeableFunction` (from `sp-runtime-interface`) and +/// /// by default this is initialized to jump into the corresponding function in +/// /// `extern_host_functions_impls`. /// /// /// /// This can be used to replace the implementation of the `call` function. /// /// Instead of calling into the host, the callee will automatically call the other @@ -279,6 +317,8 @@ pub use sp_std; /// /// `host_call.replace_implementation(some_other_impl)` /// pub static host_call: () = (); /// pub static host_set_or_clear: () = (); +/// #[cfg(feature = "experimental-feature")] +/// pub static gated_call: () = (); /// /// pub fn call(data: &[u8]) -> Vec { /// // This is the actual call: `host_call.get()(data)` @@ -291,6 +331,12 @@ pub use sp_std; /// pub fn set_or_clear(optional: Option>) { /// // Same as above /// } +/// +/// #[cfg(feature = "experimental-feature")] +/// pub fn gated_call(data: &[u8]) -> Vec { +/// // Same as above +/// Vec::new() +/// } /// } /// ``` /// diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index ec07481234e91..2ff86086a890b 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -13,10 +13,9 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../io" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../io" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder", optional = true } @@ -27,6 +26,5 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime-interface/std", - "sp-std/std", "substrate-wasm-builder", ] diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs index b7676a70dfe84..651f57388e0d0 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/build.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -22,6 +22,7 @@ fn main() { .with_current_project() .export_heap_base() .import_memory() + .disable_runtime_version_section_check() .build(); } } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 1061a54d7f1b8..82fa35267cf51 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../io" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-io = { version = "23.0.0", default-features = false, path = "../../io" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder", optional = true } diff --git a/primitives/runtime-interface/test-wasm/build.rs b/primitives/runtime-interface/test-wasm/build.rs index b7676a70dfe84..651f57388e0d0 100644 --- a/primitives/runtime-interface/test-wasm/build.rs +++ b/primitives/runtime-interface/test-wasm/build.rs @@ -22,6 +22,7 @@ fn main() { .with_current_project() .export_heap_base() .import_memory() + .disable_runtime_version_section_check() .build(); } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 4e4522fd93dd2..e5b34c0d27ddf 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -16,9 +16,9 @@ tracing = "0.1.29" tracing-core = "0.1.28" sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-executor-common = { version = "0.10.0-dev", path = "../../../client/executor/common" } -sp-io = { version = "7.0.0", path = "../../io" } -sp-runtime = { version = "7.0.0", path = "../../runtime" } -sp-runtime-interface = { version = "7.0.0", path = "../" } +sp-io = { version = "23.0.0", path = "../../io" } +sp-runtime = { version = "24.0.0", path = "../../runtime" } +sp-runtime-interface = { version = "17.0.0", path = "../" } sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.13.0", path = "../../state-machine" } +sp-state-machine = { version = "0.28.0", path = "../../state-machine" } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index e1be3b5d99d9b..215704a112154 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -109,8 +109,8 @@ fn host_function_not_found() { .0 .unwrap_err(); - assert!(err.contains("Instantiation: Export ")); - assert!(err.contains(" not found")); + assert!(err.contains("test_return_data")); + assert!(err.contains(" Failed to create module")); } #[test] diff --git a/primitives/runtime-interface/tests/ui/no_feature_gated_method.rs b/primitives/runtime-interface/tests/ui/no_feature_gated_method.rs new file mode 100644 index 0000000000000..51e45f178f0c5 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_feature_gated_method.rs @@ -0,0 +1,18 @@ +use sp_runtime_interface::runtime_interface; + +#[runtime_interface] +trait Test { + fn foo() {} + + #[cfg(feature = "bar-feature")] + fn bar() {} + + #[cfg(not(feature = "bar-feature"))] + fn qux() {} +} + +fn main() { + test::foo(); + test::bar(); + test::qux(); +} diff --git a/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr b/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr new file mode 100644 index 0000000000000..e8accd62fc68d --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr @@ -0,0 +1,5 @@ +error[E0425]: cannot find function `bar` in module `test` + --> tests/ui/no_feature_gated_method.rs:16:8 + | +16 | test::bar(); + | ^^^ not found in `test` diff --git a/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.rs b/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.rs new file mode 100644 index 0000000000000..a4a8a5804bee3 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.rs @@ -0,0 +1,12 @@ +use sp_runtime_interface::runtime_interface; + +#[runtime_interface] +trait Test { + fn foo() {} + + #[version(2)] + #[cfg(feature = "foo-feature")] + fn foo() {} +} + +fn main() {} diff --git a/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.stderr b/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.stderr new file mode 100644 index 0000000000000..6f50e14278de5 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_versioned_conditional_build.stderr @@ -0,0 +1,5 @@ +error: Conditional compilation is not supported for versioned functions + --> tests/ui/no_versioned_conditional_build.rs:7:2 + | +7 | #[version(2)] + | ^ diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 0d67a89c38c03..246b2a2ed2eb0 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "7.0.0" +version = "24.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" @@ -22,21 +22,21 @@ log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-io = { version = "7.0.0", default-features = false, path = "../io" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-weights = { version = "4.0.0", default-features = false, path = "../weights" } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-io = { version = "23.0.0", default-features = false, path = "../io" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-weights = { version = "20.0.0", default-features = false, path = "../weights" } [dev-dependencies] rand = "0.8.5" serde_json = "1.0.85" zstd = { version = "0.12.3", default-features = false } sp-api = { version = "4.0.0-dev", path = "../api" } -sp-state-machine = { version = "0.13.0", path = "../state-machine" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-state-machine = { version = "0.28.0", path = "../state-machine" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] @@ -57,6 +57,9 @@ std = [ "sp-io/std", "sp-std/std", "sp-weights/std", + "sp-api/std", + "sp-state-machine/std", + "sp-tracing/std" ] # Serde support without relying on std features. diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 6261e412eb8ad..05146e880cb16 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -25,7 +25,10 @@ use serde::{Deserialize, Serialize}; use crate::{ codec::{Codec, Decode, Encode}, - traits::{self, Block as BlockT, Header as HeaderT, MaybeSerialize, Member, NumberFor}, + traits::{ + self, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeSerializeDeserialize, + Member, NumberFor, + }, Justifications, }; use sp_core::RuntimeDebug; @@ -79,16 +82,23 @@ impl fmt::Display for BlockId { #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "serde", serde(deny_unknown_fields))] -pub struct Block { +pub struct Block { /// The block header. pub header: Header, /// The accompanying extrinsics. pub extrinsics: Vec, } -impl traits::Block for Block +impl traits::HeaderProvider for Block where Header: HeaderT, +{ + type HeaderT = Header; +} + +impl traits::Block for Block +where + Header: HeaderT + MaybeSerializeDeserialize, Extrinsic: Member + Codec + traits::Extrinsic, { type Extrinsic = Extrinsic; diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 7c1faaefb419a..82ab9a61f96d8 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -22,10 +22,11 @@ use crate::{ generic::Digest, scale_info::TypeInfo, traits::{ - self, AtLeast32BitUnsigned, Hash as HashT, MaybeDisplay, MaybeSerialize, - MaybeSerializeDeserialize, Member, SimpleBitOps, + self, AtLeast32BitUnsigned, Hash as HashT, MaybeDisplay, MaybeFromStr, + MaybeSerializeDeserialize, Member, }, }; +use codec::{FullCodec, MaxEncodedLen}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_core::U256; @@ -33,6 +34,7 @@ use sp_std::fmt::Debug; /// Abstraction over a block header for a substrate chain. #[derive(Encode, Decode, PartialEq, Eq, Clone, sp_core::RuntimeDebug, TypeInfo)] +#[scale_info(skip_type_params(Hash))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "serde", serde(deny_unknown_fields))] @@ -79,55 +81,58 @@ impl traits::Header for Header where Number: Member + MaybeSerializeDeserialize + + MaybeFromStr + Debug + + Default + sp_std::hash::Hash + MaybeDisplay + AtLeast32BitUnsigned - + Codec + + FullCodec + Copy + + MaxEncodedLen + Into + TryFrom - + sp_std::str::FromStr, + + TypeInfo, Hash: HashT, - Hash::Output: Default - + sp_std::hash::Hash - + Copy - + Member - + Ord - + MaybeSerialize - + Debug - + MaybeDisplay - + SimpleBitOps - + Codec, { type Number = Number; type Hash = ::Output; type Hashing = Hash; + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Digest, + ) -> Self { + Self { number, extrinsics_root, state_root, parent_hash, digest } + } fn number(&self) -> &Self::Number { &self.number } + fn set_number(&mut self, num: Self::Number) { self.number = num } - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } + fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - fn state_root(&self) -> &Self::Hash { &self.state_root } + fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } + fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } @@ -141,16 +146,6 @@ where log::debug!(target: "header", "Retrieving mutable reference to digest"); &mut self.digest } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Digest, - ) -> Self { - Self { number, extrinsics_root, state_root, parent_hash, digest } - } } impl Header @@ -164,8 +159,6 @@ where + Into + TryFrom, Hash: HashT, - Hash::Output: - Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, { /// Convenience helper for computing the hash of the header without having /// to import the trait. diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index d9eee7fee8b20..3687f7cdb3b2b 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -15,9 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] -//! Generic implementations of Extrinsic/Header/Block. -// end::description[] +//! Generic implementations of [`crate::traits::Header`], [`crate::traits::Block`] and +//! [`crate::traits::Extrinsic`]. mod block; mod checked_extrinsic; diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index b9d7b9eb1b6c8..0b1cd2b54290e 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -21,7 +21,7 @@ use crate::{ generic::CheckedExtrinsic, traits::{ self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, - SignedExtension, + SignaturePayload, SignedExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -40,6 +40,9 @@ use sp_std::{fmt, prelude::*}; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; +/// The `SingaturePayload` of `UncheckedExtrinsic`. +type UncheckedSignaturePayload = (Address, Signature, Extra); + /// A extrinsic right from the external world. This is unchecked and so /// can contain a signature. #[derive(PartialEq, Eq, Clone)] @@ -50,11 +53,19 @@ where /// The signature, address, number of extrinsics have come before from /// the same signer and an era describing the longevity of this transaction, /// if this is a signed extrinsic. - pub signature: Option<(Address, Signature, Extra)>, + pub signature: Option>, /// The function that should be called. pub function: Call, } +impl SignaturePayload + for UncheckedSignaturePayload +{ + type SignatureAddress = Address; + type Signature = Signature; + type SignatureExtra = Extra; +} + /// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded /// `Vec`, but requires some logic to extract the signature and payload. /// @@ -103,12 +114,12 @@ impl } } -impl Extrinsic - for UncheckedExtrinsic +impl + Extrinsic for UncheckedExtrinsic { type Call = Call; - type SignaturePayload = (Address, Signature, Extra); + type SignaturePayload = UncheckedSignaturePayload; fn is_signed(&self) -> Option { Some(self.signature.is_some()) diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 7be5bebf5de80..dd861ad05de9b 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -15,7 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime Modules shared primitive types. +//! # Substrate Runtime Primitives. +//! +//! This crate, among other things, contains a large library of types and utilities that are used in +//! the Substrate runtime, but are not particularly `FRAME`-oriented. +//! +//! ## Block, Header and Extrinsics +//! +//! Most notable, this crate contains some of the types and trait that enable important +//! communication between the client and the runtime. This includes: +//! +//! - A set of traits to declare what any block/header/extrinsic type should provide. +//! - [`traits::Block`], [`traits::Header`], [`traits::Extrinsic`] +//! - A set of types that implement these traits, whilst still providing a high degree of +//! configurability via generics. +//! - [`generic::Block`], [`generic::Header`], [`generic::UncheckedExtrinsic`] and +//! [`generic::CheckedExtrinsic`] +//! +//! ## Runtime API Types +//! +//! This crate also contains some types that are often used in conjuncture with Runtime APIs. Most +//! notable: +//! +//! - [`ApplyExtrinsicResult`], and [`DispatchOutcome`], which dictate how the client and runtime +//! communicate about the success or failure of an extrinsic. +//! - [`transaction_validity`], which dictates how the client and runtime communicate about the +//! validity of an extrinsic while still in the transaction-queue. #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] @@ -43,7 +68,7 @@ pub use sp_core::storage::StateVersion; pub use sp_core::storage::{Storage, StorageChild}; use sp_core::{ - crypto::{self, ByteArray}, + crypto::{self, ByteArray, FromEntropy}, ecdsa, ed25519, hash::{H256, H512}, sr25519, @@ -96,7 +121,7 @@ pub use sp_arithmetic::helpers_128bit; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ traits::SaturatedConversion, ArithmeticError, FixedI128, FixedI64, FixedPointNumber, - FixedPointOperand, FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, + FixedPointOperand, FixedU128, FixedU64, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, Rounding, UpperOf, }; @@ -199,6 +224,9 @@ pub trait BuildStorage { /// Something that can build the genesis storage of a module. #[cfg(feature = "std")] +#[deprecated( + note = "`BuildModuleGenesisStorage` is planned to be removed in December 2023. Use `BuildStorage` instead of it." +)] pub trait BuildModuleGenesisStorage: Sized { /// Create the module genesis storage into the given `storage` and `child_storage`. fn build_module_genesis_storage( @@ -311,6 +339,16 @@ pub enum MultiSigner { Ecdsa(ecdsa::Public), } +impl FromEntropy for MultiSigner { + fn from_entropy(input: &mut impl codec::Input) -> Result { + Ok(match input.read_byte()? % 3 { + 0 => Self::Ed25519(FromEntropy::from_entropy(input)?), + 1 => Self::Sr25519(FromEntropy::from_entropy(input)?), + 2.. => Self::Ecdsa(FromEntropy::from_entropy(input)?), + }) + } +} + /// NOTE: This implementations is required by `SimpleAddressDeterminer`, /// we convert the hash into some AccountId, it's fine to use any scheme. impl> crypto::UncheckedFrom for MultiSigner { @@ -784,6 +822,9 @@ pub type ApplyExtrinsicResult = pub type ApplyExtrinsicResultWithInfo = Result, transaction_validity::TransactionValidityError>; +/// The error type used as return type in try runtime hooks. +pub type TryRuntimeError = DispatchError; + /// Verify a signature on an encoded value in a lazy manner. This can be /// an optimization if the signature scheme has an "unsigned" escape hash. pub fn verify_encoded_lazy( diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 25e5c1007d5da..bacc0073825bb 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -343,10 +343,10 @@ impl Response { /// A buffered byte iterator over response body. /// /// Note that reading the body may return `None` in following cases: -/// 1. Either the deadline you've set is reached (check via `#error`; -/// In such case you can resume the reader by setting a new deadline) -/// 2. Or because of IOError. In such case the reader is not resumable and will keep -/// returning `None`. +/// 1. Either the deadline you've set is reached (check via `#error`; In such case you can resume +/// the reader by setting a new deadline) +/// 2. Or because of IOError. In such case the reader is not resumable and will keep returning +/// `None`. /// 3. The body has been returned. The reader will keep returning `None`. #[derive(Clone)] pub struct ResponseBody { diff --git a/primitives/runtime/src/runtime_logger.rs b/primitives/runtime/src/runtime_logger.rs index 63e96a52a527f..b7374b8b6f6c8 100644 --- a/primitives/runtime/src/runtime_logger.rs +++ b/primitives/runtime/src/runtime_logger.rs @@ -68,8 +68,7 @@ mod tests { use sp_api::ProvideRuntimeApi; use std::{env, str::FromStr}; use substrate_test_runtime_client::{ - runtime::TestAPI, DefaultTestClientBuilderExt, ExecutionStrategy, TestClientBuilder, - TestClientBuilderExt, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; #[test] @@ -78,9 +77,7 @@ mod tests { sp_tracing::try_init_simple(); log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); runtime_api .do_trace_log(client.chain_info().genesis_hash) diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 6d02e23094f90..5f94c834a8f29 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -23,7 +23,7 @@ use crate::{ scale_info::TypeInfo, traits::{ self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, - PostDispatchInfoOf, SignedExtension, ValidateUnsigned, + PostDispatchInfoOf, SignaturePayload, SignedExtension, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, ApplyExtrinsicResultWithInfo, KeyTypeId, @@ -235,7 +235,7 @@ impl Deref for ExtrinsicWrapper { } /// Testing block -#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, TypeInfo)] pub struct Block { /// Block header pub header: Header, @@ -243,6 +243,10 @@ pub struct Block { pub extrinsics: Vec, } +impl traits::HeaderProvider for Block { + type HeaderT = Header; +} + impl< Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, > traits::Block for Block @@ -279,6 +283,15 @@ where } } +/// The signature payload of a `TestXt`. +type TxSingaturePayload = (u64, Extra); + +impl SignaturePayload for TxSingaturePayload { + type SignatureAddress = u64; + type Signature = (); + type SignatureExtra = Extra; +} + /// Test transaction, tuple of (sender, call, signed_extra) /// with index only used if sender is some. /// @@ -286,7 +299,7 @@ where #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct TestXt { /// Signature of the extrinsic. - pub signature: Option<(u64, Extra)>, + pub signature: Option>, /// Call of the extrinsic. pub call: Call, } @@ -331,9 +344,11 @@ impl Checkable for TestXt traits::Extrinsic for TestXt { +impl traits::Extrinsic + for TestXt +{ type Call = Call; - type SignaturePayload = (u64, Extra); + type SignaturePayload = TxSingaturePayload; fn is_signed(&self) -> Option { Some(self.signature.is_some()) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index d37db4802d78f..17dc7ce50ea8b 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -18,7 +18,6 @@ //! Primitives for the runtime modules. use crate::{ - codec::{Codec, Decode, Encode, MaxEncodedLen}, generic::Digest, scale_info::{MetaType, StaticTypeInfo, TypeInfo}, transaction_validity::{ @@ -27,6 +26,7 @@ use crate::{ }, DispatchResult, }; +use codec::{Codec, Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use impl_trait_for_tuples::impl_for_tuples; #[cfg(feature = "serde")] use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -339,6 +339,33 @@ impl> TryMorph for TryMorphInto { } } +/// Implementation of `Morph` to retrieve just the first element of a tuple. +pub struct TakeFirst; +impl Morph<(T1,)> for TakeFirst { + type Outcome = T1; + fn morph(a: (T1,)) -> T1 { + a.0 + } +} +impl Morph<(T1, T2)> for TakeFirst { + type Outcome = T1; + fn morph(a: (T1, T2)) -> T1 { + a.0 + } +} +impl Morph<(T1, T2, T3)> for TakeFirst { + type Outcome = T1; + fn morph(a: (T1, T2, T3)) -> T1 { + a.0 + } +} +impl Morph<(T1, T2, T3, T4)> for TakeFirst { + type Outcome = T1; + fn morph(a: (T1, T2, T3, T4)) -> T1 { + a.0 + } +} + /// Create a `Morph` and/or `TryMorph` impls with a simple closure-like expression. /// /// # Examples @@ -512,13 +539,25 @@ macro_rules! morph_types { morph_types! { /// Morpher to disregard the source value and replace with another. pub type Replace = |_| -> V::Type { V::get() }; + /// Mutator which reduces a scalar by a particular amount. pub type ReduceBy = |r: N::Type| -> N::Type { r.checked_sub(&N::get()).unwrap_or(Zero::zero()) } where N::Type: CheckedSub | Zero; + + /// A `TryMorph` implementation to reduce a scalar by a particular amount, checking for + /// underflow. + pub type CheckedReduceBy: TryMorph = |r: N::Type| -> Result { + r.checked_sub(&N::get()).ok_or(()) + } where N::Type: CheckedSub; + + /// A `TryMorph` implementation to enforce an upper limit for a result of the outer morphed type. + pub type MorphWithUpperLimit: TryMorph = |r: L::Type| -> Result { + M::try_morph(r).map(|m| m.min(L::get())) + } where L::Type: Ord, M: TryMorph; } -/// Extensible conversion trait. Generic over both source and destination types. +/// Infallible conversion trait. Generic over both source and destination types. pub trait Convert { /// Make conversion. fn convert(a: A) -> B; @@ -530,6 +569,161 @@ impl Convert for () { } } +/// Reversing infallible conversion trait. Generic over both source and destination types. +/// +/// This specifically reverses the conversion. +pub trait ConvertBack: Convert { + /// Make conversion back. + fn convert_back(b: B) -> A; +} + +/// Fallible conversion trait returning an [Option]. Generic over both source and destination types. +pub trait MaybeConvert { + /// Attempt to make conversion. + fn maybe_convert(a: A) -> Option; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl MaybeConvert for Tuple { + fn maybe_convert(a: A) -> Option { + for_tuples!( #( + match Tuple::maybe_convert(a.clone()) { + Some(b) => return Some(b), + None => {}, + } + )* ); + None + } +} + +/// Reversing fallible conversion trait returning an [Option]. Generic over both source and +/// destination types. +pub trait MaybeConvertBack: MaybeConvert { + /// Attempt to make conversion back. + fn maybe_convert_back(b: B) -> Option; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl MaybeConvertBack for Tuple { + fn maybe_convert_back(b: B) -> Option { + for_tuples!( #( + match Tuple::maybe_convert_back(b.clone()) { + Some(a) => return Some(a), + None => {}, + } + )* ); + None + } +} + +/// Fallible conversion trait which returns the argument in the case of being unable to convert. +/// Generic over both source and destination types. +pub trait TryConvert { + /// Attempt to make conversion. If returning [Result::Err], the inner must always be `a`. + fn try_convert(a: A) -> Result; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl TryConvert for Tuple { + fn try_convert(a: A) -> Result { + for_tuples!( #( + let a = match Tuple::try_convert(a) { + Ok(b) => return Ok(b), + Err(a) => a, + }; + )* ); + Err(a) + } +} + +/// Reversing fallible conversion trait which returns the argument in the case of being unable to +/// convert back. Generic over both source and destination types. +pub trait TryConvertBack: TryConvert { + /// Attempt to make conversion back. If returning [Result::Err], the inner must always be `b`. + + fn try_convert_back(b: B) -> Result; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl TryConvertBack for Tuple { + fn try_convert_back(b: B) -> Result { + for_tuples!( #( + let b = match Tuple::try_convert_back(b) { + Ok(a) => return Ok(a), + Err(b) => b, + }; + )* ); + Err(b) + } +} + +/// Definition for a bi-directional, fallible conversion between two types. +pub trait MaybeEquivalence { + /// Attempt to convert reference of `A` into value of `B`, returning `None` if not possible. + fn convert(a: &A) -> Option; + /// Attempt to convert reference of `B` into value of `A`, returning `None` if not possible. + fn convert_back(b: &B) -> Option; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl MaybeEquivalence for Tuple { + fn convert(a: &A) -> Option { + for_tuples!( #( + match Tuple::convert(a) { + Some(b) => return Some(b), + None => {}, + } + )* ); + None + } + fn convert_back(b: &B) -> Option { + for_tuples!( #( + match Tuple::convert_back(b) { + Some(a) => return Some(a), + None => {}, + } + )* ); + None + } +} + +/// Adapter which turns a [Get] implementation into a [Convert] implementation which always returns +/// in the same value no matter the input. +pub struct ConvertToValue(sp_std::marker::PhantomData); +impl> Convert for ConvertToValue { + fn convert(_: X) -> Y { + T::get() + } +} +impl> MaybeConvert for ConvertToValue { + fn maybe_convert(_: X) -> Option { + Some(T::get()) + } +} +impl> MaybeConvertBack for ConvertToValue { + fn maybe_convert_back(_: Y) -> Option { + None + } +} +impl> TryConvert for ConvertToValue { + fn try_convert(_: X) -> Result { + Ok(T::get()) + } +} +impl> TryConvertBack for ConvertToValue { + fn try_convert_back(y: Y) -> Result { + Err(y) + } +} +impl> MaybeEquivalence for ConvertToValue { + fn convert(_: &X) -> Option { + Some(T::get()) + } + fn convert_back(_: &Y) -> Option { + None + } +} + /// A structure that performs identity conversion. pub struct Identity; impl Convert for Identity { @@ -542,19 +736,100 @@ impl ConvertBack for Identity { a } } +impl MaybeConvert for Identity { + fn maybe_convert(a: T) -> Option { + Some(a) + } +} +impl MaybeConvertBack for Identity { + fn maybe_convert_back(a: T) -> Option { + Some(a) + } +} +impl TryConvert for Identity { + fn try_convert(a: T) -> Result { + Ok(a) + } +} +impl TryConvertBack for Identity { + fn try_convert_back(a: T) -> Result { + Ok(a) + } +} +impl MaybeEquivalence for Identity { + fn convert(a: &T) -> Option { + Some(a.clone()) + } + fn convert_back(a: &T) -> Option { + Some(a.clone()) + } +} /// A structure that performs standard conversion using the standard Rust conversion traits. pub struct ConvertInto; -impl> Convert for ConvertInto { +impl, B> Convert for ConvertInto { fn convert(a: A) -> B { a.into() } } +impl, B> MaybeConvert for ConvertInto { + fn maybe_convert(a: A) -> Option { + Some(a.into()) + } +} +impl, B: Into> MaybeConvertBack for ConvertInto { + fn maybe_convert_back(b: B) -> Option { + Some(b.into()) + } +} +impl, B> TryConvert for ConvertInto { + fn try_convert(a: A) -> Result { + Ok(a.into()) + } +} +impl, B: Into> TryConvertBack for ConvertInto { + fn try_convert_back(b: B) -> Result { + Ok(b.into()) + } +} +impl, B: Clone + Into> MaybeEquivalence for ConvertInto { + fn convert(a: &A) -> Option { + Some(a.clone().into()) + } + fn convert_back(b: &B) -> Option { + Some(b.clone().into()) + } +} -/// Extensible conversion trait. Generic over both source and destination types. -pub trait ConvertBack: Convert { - /// Make conversion back. - fn convert_back(b: B) -> A; +/// A structure that performs standard conversion using the standard Rust conversion traits. +pub struct TryConvertInto; +impl, B> MaybeConvert for TryConvertInto { + fn maybe_convert(a: A) -> Option { + a.clone().try_into().ok() + } +} +impl, B: Clone + TryInto> MaybeConvertBack for TryConvertInto { + fn maybe_convert_back(b: B) -> Option { + b.clone().try_into().ok() + } +} +impl, B> TryConvert for TryConvertInto { + fn try_convert(a: A) -> Result { + a.clone().try_into().map_err(|_| a) + } +} +impl, B: Clone + TryInto> TryConvertBack for TryConvertInto { + fn try_convert_back(b: B) -> Result { + b.clone().try_into().map_err(|_| b) + } +} +impl, B: Clone + TryInto> MaybeEquivalence for TryConvertInto { + fn convert(a: &A) -> Option { + a.clone().try_into().ok() + } + fn convert_back(b: &B) -> Option { + b.clone().try_into().ok() + } } /// Convenience type to work around the highly unergonomic syntax needed @@ -684,18 +959,7 @@ pub trait Hash: + Hasher::Output> { /// The hash type produced. - type Output: Member - + MaybeSerializeDeserialize - + Debug - + sp_std::hash::Hash - + AsRef<[u8]> - + AsMut<[u8]> - + Copy - + Default - + Encode - + Decode - + MaxEncodedLen - + TypeInfo; + type Output: HashOutput; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { @@ -714,6 +978,47 @@ pub trait Hash: fn trie_root(input: Vec<(Vec, Vec)>, state_version: StateVersion) -> Self::Output; } +/// Super trait with all the attributes for a hashing output. +pub trait HashOutput: + Member + + MaybeSerializeDeserialize + + MaybeDisplay + + MaybeFromStr + + Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Ord + + Default + + Encode + + Decode + + EncodeLike + + MaxEncodedLen + + TypeInfo +{ +} + +impl HashOutput for T where + T: Member + + MaybeSerializeDeserialize + + MaybeDisplay + + MaybeFromStr + + Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Ord + + Default + + Encode + + Decode + + EncodeLike + + MaxEncodedLen + + TypeInfo +{ +} + /// Blake2-256 Hash implementation. #[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -732,13 +1037,13 @@ impl Hasher for BlakeTwo256 { impl Hash for BlakeTwo256 { type Output = sp_core::H256; - fn trie_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> Self::Output { - sp_io::trie::blake2_256_root(input, version) - } - fn ordered_trie_root(input: Vec>, version: StateVersion) -> Self::Output { sp_io::trie::blake2_256_ordered_root(input, version) } + + fn trie_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> Self::Output { + sp_io::trie::blake2_256_root(input, version) + } } /// Keccak-256 Hash implementation. @@ -759,13 +1064,13 @@ impl Hasher for Keccak256 { impl Hash for Keccak256 { type Output = sp_core::H256; - fn trie_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> Self::Output { - sp_io::trie::keccak_256_root(input, version) - } - fn ordered_trie_root(input: Vec>, version: StateVersion) -> Self::Output { sp_io::trie::keccak_256_ordered_root(input, version) } + + fn trie_root(input: Vec<(Vec, Vec)>, version: StateVersion) -> Self::Output { + sp_io::trie::keccak_256_root(input, version) + } } /// Something that can be checked for equality and printed out to a debug channel if bad. @@ -849,31 +1154,24 @@ pub trait IsMember { /// `parent_hash`, as well as a `digest` and a block `number`. /// /// You can also create a `new` one from those fields. -pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'static { +pub trait Header: + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + TypeInfo + 'static +{ /// Header number. type Number: Member + MaybeSerializeDeserialize + + MaybeFromStr + Debug + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned - + Codec - + sp_std::str::FromStr; - /// Header hash type - type Hash: Member - + MaybeSerializeDeserialize - + Debug - + sp_std::hash::Hash - + Ord - + Copy - + MaybeDisplay + Default - + SimpleBitOps - + Codec - + AsRef<[u8]> - + AsMut<[u8]> - + TypeInfo; + + TypeInfo + + MaxEncodedLen + + FullCodec; + /// Header hash type + type Hash: HashOutput; /// Hashing algorithm type Hashing: Hash; @@ -917,29 +1215,52 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 's } } +// Something that provides the Header Type. Only for internal usage and should only be used +// via `HeaderFor` or `BlockNumberFor`. +// +// This is needed to fix the "cyclical" issue in loading Header/BlockNumber as part of a +// `pallet::call`. Essentially, `construct_runtime` aggregates all calls to create a `RuntimeCall` +// that is then used to define `UncheckedExtrinsic`. +// ```ignore +// pub type UncheckedExtrinsic = +// generic::UncheckedExtrinsic; +// ``` +// This `UncheckedExtrinsic` is supplied to the `Block`. +// ```ignore +// pub type Block = generic::Block; +// ``` +// So, if we do not create a trait outside of `Block` that doesn't have `Extrinsic`, we go into a +// recursive loop leading to a build error. +// +// Note that this is a workaround for a compiler bug and should be removed when the compiler +// bug is fixed. +#[doc(hidden)] +pub trait HeaderProvider { + /// Header type. + type HeaderT: Header; +} + /// Something which fulfills the abstract idea of a Substrate block. It has types for /// `Extrinsic` pieces of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'static { +pub trait Block: + HeaderProvider::Header> + + Clone + + Send + + Sync + + Codec + + Eq + + MaybeSerialize + + Debug + + 'static +{ /// Type for extrinsics. type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; /// Header type. - type Header: Header; + type Header: Header + MaybeSerializeDeserialize; /// Block hash type. - type Hash: Member - + MaybeSerializeDeserialize - + Debug - + sp_std::hash::Hash - + Ord - + Copy - + MaybeDisplay - + Default - + SimpleBitOps - + Codec - + AsRef<[u8]> - + AsMut<[u8]> - + TypeInfo; + type Hash: HashOutput; /// Returns a reference to the header. fn header(&self) -> &Self::Header; @@ -961,14 +1282,14 @@ pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'st /// Something that acts like an `Extrinsic`. pub trait Extrinsic: Sized { /// The function call. - type Call; + type Call: TypeInfo; /// The payload we carry for signed extrinsics. /// /// Usually it will contain a `Signature` and /// may include some additional data that are specific to signed /// extrinsics. - type SignaturePayload; + type SignaturePayload: SignaturePayload; /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. @@ -987,6 +1308,31 @@ pub trait Extrinsic: Sized { } } +/// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an +/// [`Extrinsic`]. +pub trait SignaturePayload { + /// The type of the address that signed the extrinsic. + /// + /// Particular to a signed extrinsic. + type SignatureAddress: TypeInfo; + + /// The signature type of the extrinsic. + /// + /// Particular to a signed extrinsic. + type Signature: TypeInfo; + + /// The additional data that is specific to the signed extrinsic. + /// + /// Particular to a signed extrinsic. + type SignatureExtra: TypeInfo; +} + +impl SignaturePayload for () { + type SignatureAddress = (); + type Signature = (); + type SignatureExtra = (); +} + /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { /// The format version of the `Extrinsic`. @@ -999,7 +1345,7 @@ pub trait ExtrinsicMetadata { } /// Extract the hashing type for a block. -pub type HashFor = <::Header as Header>::Hashing; +pub type HashingFor = <::Header as Header>::Hashing; /// Extract the number type for a block. pub type NumberFor = <::Header as Header>::Number; /// Extract the digest type for a block. @@ -1762,7 +2108,7 @@ macro_rules! impl_opaque_keys_inner { /// } /// ``` #[macro_export] -#[cfg(feature = "std")] +#[cfg(any(feature = "serde", feature = "std"))] macro_rules! impl_opaque_keys { { $( #[ $attr:meta ] )* @@ -1792,7 +2138,7 @@ macro_rules! impl_opaque_keys { } #[macro_export] -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), not(feature = "serde")))] #[doc(hidden)] macro_rules! impl_opaque_keys { { @@ -1953,11 +2299,27 @@ pub trait BlockNumberProvider { mod tests { use super::*; use crate::codec::{Decode, Encode, Input}; + #[cfg(feature = "bls-experimental")] + use sp_core::{bls377, bls381}; use sp_core::{ crypto::{Pair, UncheckedFrom}, - ecdsa, + ecdsa, ed25519, sr25519, }; + macro_rules! signature_verify_test { + ($algorithm:ident) => { + let msg = &b"test-message"[..]; + let wrong_msg = &b"test-msg"[..]; + let (pair, _) = $algorithm::Pair::generate(); + + let signature = pair.sign(&msg); + assert!($algorithm::Pair::verify(&signature, msg, &pair.public())); + + assert!(signature.verify(msg, &pair.public())); + assert!(!signature.verify(wrong_msg, &pair.public())); + }; + } + mod t { use sp_application_crypto::{app_crypto, sr25519}; use sp_core::crypto::KeyTypeId; @@ -2067,15 +2429,28 @@ mod tests { assert_eq!(buffer, [0, 0]); } + #[test] + fn ed25519_verify_works() { + signature_verify_test!(ed25519); + } + + #[test] + fn sr25519_verify_works() { + signature_verify_test!(sr25519); + } + #[test] fn ecdsa_verify_works() { - let msg = &b"test-message"[..]; - let (pair, _) = ecdsa::Pair::generate(); + signature_verify_test!(ecdsa); + } - let signature = pair.sign(&msg); - assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); + #[cfg(feature = "bls-experimental")] + fn bls377_verify_works() { + signature_verify_test!(bls377) + } - assert!(signature.verify(msg, &pair.public())); - assert!(signature.verify(msg, &pair.public())); + #[cfg(feature = "bls-experimental")] + fn bls381_verify_works() { + signature_verify_test!(bls381) } } diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 6f362974b91b4..f547608fe51e7 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -13,13 +13,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", optional = true, path = "../runtime" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "24.0.0", optional = true, path = "../runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../staking" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-keystore = { version = "0.27.0", path = "../keystore", optional = true } [features] default = [ "std" ] @@ -31,4 +32,5 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-std/std", + "sp-keystore/std" ] diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 642aa2a21143e..45395e9766f55 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -112,17 +112,22 @@ pub fn generate_initial_session_keys( client: std::sync::Arc, at: Block::Hash, seeds: Vec, + keystore: sp_keystore::KeystorePtr, ) -> Result<(), sp_api::ApiError> where Block: BlockT, T: ProvideRuntimeApi, T::Api: SessionKeys, { + use sp_api::ApiExt; + if seeds.is_empty() { return Ok(()) } - let runtime_api = client.runtime_api(); + let mut runtime_api = client.runtime_api(); + + runtime_api.register_extension(sp_keystore::KeystoreExt::from(keystore)); for seed in seeds { runtime_api.generate_session_keys(at, Some(seed.as_bytes().to_vec()))?; diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index f383a5e88759f..43ee307aa109a 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -13,21 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.136", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +impl-trait-for-tuples = "0.2.2" + +sp-core = { default-features = false, path = "../core" } +sp-runtime = { default-features = false, path = "../runtime" } +sp-std = { default-features = false, path = "../std" } [features] default = ["std"] std = [ - "serde", + "serde/std", "codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "sp-std/std", ] -runtime-benchmarks = [] +runtime-benchmarks = [ + "sp-runtime/runtime-benchmarks" +] diff --git a/primitives/staking/src/currency_to_vote.rs b/primitives/staking/src/currency_to_vote.rs new file mode 100644 index 0000000000000..556e5bd210426 --- /dev/null +++ b/primitives/staking/src/currency_to_vote.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_runtime::{ + traits::{UniqueSaturatedFrom, UniqueSaturatedInto}, + SaturatedConversion, +}; + +/// A trait similar to `Convert` to convert values from `B` an abstract balance type +/// into u64 and back from u128. (This conversion is used in election and other places where complex +/// calculation over balance type is needed) +/// +/// Total issuance of the currency is passed in, but an implementation of this trait may or may not +/// use it. +/// +/// # WARNING +/// +/// the total issuance being passed in implies that the implementation must be aware of the fact +/// that its values can affect the outcome. This implies that if the vote value is dependent on the +/// total issuance, it should never ber written to storage for later re-use. +pub trait CurrencyToVote { + /// Convert balance to u64. + fn to_vote(value: B, issuance: B) -> u64; + + /// Convert u128 to balance. + fn to_currency(value: u128, issuance: B) -> B; +} + +/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. +/// +/// The factor is the `(total_issuance / u64::MAX).max(1)`, represented as u64. Let's look at the +/// important cases: +/// +/// If the chain's total issuance is less than u64::MAX, this will always be 1, which means that +/// the factor will not have any effect. In this case, any account's balance is also less. Thus, +/// both of the conversions are basically an `as`; Any balance can fit in u64. +/// +/// If the chain's total issuance is more than 2*u64::MAX, then a factor might be multiplied and +/// divided upon conversion. +pub struct U128CurrencyToVote; + +impl U128CurrencyToVote { + fn factor(issuance: u128) -> u128 { + (issuance / u64::MAX as u128).max(1) + } +} + +impl CurrencyToVote for U128CurrencyToVote { + fn to_vote(value: u128, issuance: u128) -> u64 { + (value / Self::factor(issuance)).saturated_into() + } + + fn to_currency(value: u128, issuance: u128) -> u128 { + value.saturating_mul(Self::factor(issuance)) + } +} + +/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. +/// +/// # Warning +/// +/// This is designed to be used mostly for testing. Use with care, and think about the consequences. +pub struct SaturatingCurrencyToVote; + +impl + UniqueSaturatedFrom> CurrencyToVote + for SaturatingCurrencyToVote +{ + fn to_vote(value: B, _: B) -> u64 { + value.unique_saturated_into() + } + + fn to_currency(value: u128, _: B) -> B { + B::unique_saturated_from(value) + } +} + +#[cfg(feature = "std")] +impl + UniqueSaturatedFrom> CurrencyToVote for () { + fn to_vote(value: B, issuance: B) -> u64 { + SaturatingCurrencyToVote::to_vote(value, issuance) + } + + /// Convert u128 to balance. + fn to_currency(value: u128, issuance: B) -> B { + SaturatingCurrencyToVote::to_currency(value, issuance) + } +} diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 57128bd327d9e..1621af164b375 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -20,13 +20,17 @@ //! A crate which contains primitives that are useful for implementation that uses staking //! approaches in general. Definitions related to sessions, slashing, etc go here. +use crate::currency_to_vote::CurrencyToVote; +use codec::{FullCodec, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::RuntimeDebug; -use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; +use sp_runtime::{DispatchError, DispatchResult, Saturating}; +use sp_std::{collections::btree_map::BTreeMap, ops::Sub, vec::Vec}; pub mod offence; +pub mod currency_to_vote; + /// Simple index type with which we can count sessions. pub type SessionIndex = u32; @@ -35,7 +39,7 @@ pub type EraIndex = u32; /// Representation of the status of a staker. #[derive(RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone))] pub enum StakerStatus { /// Chilling. Idle, @@ -45,32 +49,9 @@ pub enum StakerStatus { Nominator(Vec), } -/// Trait describing something that implements a hook for any operations to perform when a staker is -/// slashed. -pub trait OnStakerSlash { - /// A hook for any operations to perform when a staker is slashed. - /// - /// # Arguments - /// - /// * `stash` - The stash of the staker whom the slash was applied to. - /// * `slashed_active` - The new bonded balance of the staker after the slash was applied. - /// * `slashed_unlocking` - A map of slashed eras, and the balance of that unlocking chunk after - /// the slash is applied. Any era not present in the map is not affected at all. - fn on_slash( - stash: &AccountId, - slashed_active: Balance, - slashed_unlocking: &BTreeMap, - ); -} - -impl OnStakerSlash for () { - fn on_slash(_: &AccountId, _: Balance, _: &BTreeMap) { - // Nothing to do here - } -} - /// A struct that reflects stake that an account has in the staking system. Provides a set of /// methods to operate on it's properties. Aimed at making `StakingInterface` more concise. +#[derive(RuntimeDebug, Clone, Copy, Eq, PartialEq, Default)] pub struct Stake { /// The total stake that `stash` has in the staking system. This includes the /// `active` stake, and any funds currently in the process of unbonding via @@ -87,16 +68,88 @@ pub struct Stake { pub active: Balance, } +/// A generic staking event listener. +/// +/// Note that the interface is designed in a way that the events are fired post-action, so any +/// pre-action data that is needed needs to be passed to interface methods. The rest of the data can +/// be retrieved by using `StakingInterface`. +#[impl_trait_for_tuples::impl_for_tuples(10)] +pub trait OnStakingUpdate { + /// Fired when the stake amount of someone updates. + /// + /// This is effectively any changes to the bond amount, such as bonding more funds, and + /// unbonding. + fn on_stake_update(_who: &AccountId, _prev_stake: Option>) {} + + /// Fired when someone sets their intention to nominate. + /// + /// This should never be fired for existing nominators. + fn on_nominator_add(_who: &AccountId) {} + + /// Fired when an existing nominator updates their nominations. + /// + /// Note that this is not fired when a nominator changes their stake. For that, + /// `on_stake_update` should be used, followed by querying whether `who` was a validator or a + /// nominator. + fn on_nominator_update(_who: &AccountId, _prev_nominations: Vec) {} + + /// Fired when someone removes their intention to nominate, either due to chill or validating. + /// + /// The set of nominations at the time of removal is provided as it can no longer be fetched in + /// any way. + fn on_nominator_remove(_who: &AccountId, _nominations: Vec) {} + + /// Fired when someone sets their intention to validate. + /// + /// Note validator preference changes are not communicated, but could be added if needed. + fn on_validator_add(_who: &AccountId) {} + + /// Fired when an existing validator updates their preferences. + /// + /// Note validator preference changes are not communicated, but could be added if needed. + fn on_validator_update(_who: &AccountId) {} + + /// Fired when someone removes their intention to validate, either due to chill or nominating. + fn on_validator_remove(_who: &AccountId) {} + + /// Fired when someone is fully unstaked. + fn on_unstake(_who: &AccountId) {} + + /// Fired when a staker is slashed. + /// + /// * `stash` - The stash of the staker whom the slash was applied to. + /// * `slashed_active` - The new bonded balance of the staker after the slash was applied. + /// * `slashed_unlocking` - A map of slashed eras, and the balance of that unlocking chunk after + /// the slash is applied. Any era not present in the map is not affected at all. + fn on_slash( + _stash: &AccountId, + _slashed_active: Balance, + _slashed_unlocking: &BTreeMap, + ) { + } +} + /// A generic representation of a staking implementation. /// /// This interface uses the terminology of NPoS, but it is aims to be generic enough to cover other /// implementations as well. pub trait StakingInterface { /// Balance type used by the staking system. - type Balance: PartialEq; - - /// AccountId type used by the staking system - type AccountId; + type Balance: Sub + + Ord + + PartialEq + + Default + + Copy + + MaxEncodedLen + + FullCodec + + TypeInfo + + Saturating; + + /// AccountId type used by the staking system. + type AccountId: Clone + sp_std::fmt::Debug; + + /// Means of converting Currency to VoteWeight. + type CurrencyToVote: CurrencyToVote; /// The minimum amount required to bond in order to set nomination intentions. This does not /// necessarily mean the nomination will be counted in an election, but instead just enough to @@ -195,8 +248,12 @@ pub trait StakingInterface { /// Return the status of the given staker, `None` if not staked at all. fn status(who: &Self::AccountId) -> Result, DispatchError>; + /// Checks whether or not this is a validator account. + fn is_validator(who: &Self::AccountId) -> bool { + Self::status(who).map(|s| matches!(s, StakerStatus::Validator)).unwrap_or(false) + } + /// Get the nominations of a stash, if they are a nominator, `None` otherwise. - #[cfg(feature = "runtime-benchmarks")] fn nominations(who: &Self::AccountId) -> Option> { match Self::status(who) { Ok(StakerStatus::Nominator(t)) => Some(t), diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 6694c9055d4ff..8013166374e06 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -220,16 +220,16 @@ pub struct OffenceDetails { /// for a typical usage scenario: /// /// 1. An offence is detected and an evidence is submitted on-chain via the -/// [`OffenceReportSystem::publish_evidence`] method. This will construct -/// and submit an extrinsic transaction containing the offence evidence. +/// [`OffenceReportSystem::publish_evidence`] method. This will construct and submit an extrinsic +/// transaction containing the offence evidence. /// -/// 2. If the extrinsic is unsigned then the transaction receiver may want to -/// perform some preliminary checks before further processing. This is a good -/// place to call the [`OffenceReportSystem::check_evidence`] method. +/// 2. If the extrinsic is unsigned then the transaction receiver may want to perform some +/// preliminary checks before further processing. This is a good place to call the +/// [`OffenceReportSystem::check_evidence`] method. /// -/// 3. Finally the report extrinsic is executed on-chain. This is where the user -/// calls the [`OffenceReportSystem::process_evidence`] to consume the offence -/// report and enact any required action. +/// 3. Finally the report extrinsic is executed on-chain. This is where the user calls the +/// [`OffenceReportSystem::process_evidence`] to consume the offence report and enact any +/// required action. pub trait OffenceReportSystem { /// Longevity, in blocks, for the evidence report validity. /// diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index cc780e51abcaf..32be8e518f49f 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.13.0" +version = "0.28.0" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2021" @@ -14,26 +14,26 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hash-db = { version = "0.16.0", default-features = false } log = { version = "0.4.17", default-features = false } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8.5", optional = true } -smallvec = "1.8.0" +smallvec = "1.11.0" thiserror = { version = "1.0.30", optional = true } tracing = { version = "0.1.29", optional = true } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } -sp-panic-handler = { version = "5.0.0", optional = true, path = "../panic-handler" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-trie = { version = "7.0.0", default-features = false, path = "../trie" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../externalities" } +sp-panic-handler = { version = "8.0.0", optional = true, path = "../panic-handler" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-trie = { version = "22.0.0", default-features = false, path = "../trie" } +trie-db = { version = "0.27.1", default-features = false } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" pretty_assertions = "1.2.1" rand = "0.8.5" -sp-runtime = { version = "7.0.0", path = "../runtime" } -trie-db = "0.27.1" +sp-runtime = { version = "24.0.0", path = "../runtime" } assert_matches = "1.5" [features] @@ -49,6 +49,8 @@ std = [ "sp-panic-handler", "sp-std/std", "sp-trie/std", + "trie-db/std", "thiserror", "tracing", + "sp-runtime/std" ] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index f3244308a54cf..2a25bdc54d949 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -30,6 +30,7 @@ use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; +use sp_trie::PrefixedMemoryDB; /// A struct containing arguments for iterating over the storage. #[derive(Default)] @@ -168,6 +169,12 @@ where } } +/// The transaction type used by [`Backend`]. +/// +/// This transaction contains all the changes that need to be applied to the backend to create the +/// state for a new block. +pub type BackendTransaction = PrefixedMemoryDB; + /// A state backend is used to read state data and can have changes committed /// to it. /// @@ -176,11 +183,8 @@ pub trait Backend: sp_std::fmt::Debug { /// An error type when fetching data is not possible. type Error: super::Error; - /// Storage changes to be applied if committing - type Transaction: Consolidate + Default + Send; - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; + type TrieBackendStorage: TrieBackendStorage; /// Type of the raw storage iterator. type RawIter: StorageIterator; @@ -236,7 +240,7 @@ pub trait Backend: sp_std::fmt::Debug { &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, Self::Transaction) + ) -> (H::Out, BackendTransaction) where H::Out: Ord; @@ -248,7 +252,7 @@ pub trait Backend: sp_std::fmt::Debug { child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, Self::Transaction) + ) -> (H::Out, bool, BackendTransaction) where H::Out: Ord; @@ -283,11 +287,11 @@ pub trait Backend: sp_std::fmt::Debug { Item = (&'a ChildInfo, impl Iterator)>), >, state_version: StateVersion, - ) -> (H::Out, Self::Transaction) + ) -> (H::Out, BackendTransaction) where H::Out: Ord + Encode, { - let mut txs: Self::Transaction = Default::default(); + let mut txs = BackendTransaction::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { @@ -308,6 +312,7 @@ pub trait Backend: sp_std::fmt::Debug { state_version, ); txs.consolidate(parent_txs); + (root, txs) } @@ -331,7 +336,7 @@ pub trait Backend: sp_std::fmt::Debug { fn commit( &self, _: H::Out, - _: Self::Transaction, + _: BackendTransaction, _: StorageCollection, _: ChildStorageCollection, ) -> Result<(), Self::Error> { @@ -377,34 +382,6 @@ pub trait AsTrieBackend> { fn as_trie_backend(&self) -> &TrieBackend; } -/// Trait that allows consolidate two transactions together. -pub trait Consolidate { - /// Consolidate two transactions into one. - fn consolidate(&mut self, other: Self); -} - -impl Consolidate for () { - fn consolidate(&mut self, _: Self) { - () - } -} - -impl Consolidate for Vec<(Option, StorageCollection)> { - fn consolidate(&mut self, mut other: Self) { - self.append(&mut other); - } -} - -impl Consolidate for sp_trie::GenericMemoryDB -where - H: Hasher, - KF: sp_trie::KeyFunction, -{ - fn consolidate(&mut self, other: Self) { - sp_trie::GenericMemoryDB::consolidate(self, other) - } -} - /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. #[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index a7adbc8a0daee..ace88aee2628f 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -29,7 +29,7 @@ use sp_core::{ Blake2Hasher, }; use sp_externalities::{Extension, Extensions, MultiRemovalResults}; -use sp_trie::{empty_child_trie_root, HashKey, LayoutV0, LayoutV1, TrieConfiguration}; +use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, @@ -39,7 +39,7 @@ use std::{ /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - overlay: OverlayedChanges, + overlay: OverlayedChanges, extensions: Extensions, } @@ -282,7 +282,7 @@ impl Externalities for BasicExternalities { if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { let delta = data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); - crate::in_memory_backend::new_in_mem::>() + crate::in_memory_backend::new_in_mem::() .child_storage_root(&child_info, delta, state_version) .0 } else { diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3c088a2176582..11df46f2a4a3a 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -22,7 +22,7 @@ use crate::overlayed_changes::OverlayedExtensions; use crate::{ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, }; -use codec::{Decode, Encode, EncodeAppend}; +use codec::{Encode, EncodeAppend}; use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::hexdisplay::HexDisplay; @@ -30,9 +30,8 @@ use sp_core::storage::{ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; -use sp_trie::{empty_child_trie_root, LayoutV1}; -use crate::{log_error, trace, warn, StorageTransactionCache}; +use crate::{log_error, trace, warn}; use sp_std::{ any::{Any, TypeId}, boxed::Box, @@ -98,11 +97,9 @@ where B: 'a + Backend, { /// The overlayed changes to write to. - overlay: &'a mut OverlayedChanges, + overlay: &'a mut OverlayedChanges, /// The storage backend to read from. backend: &'a B, - /// The cache for the storage transactions. - storage_transaction_cache: &'a mut StorageTransactionCache, /// Pseudo-unique id used for tracing. pub id: u16, /// Extensions registered with this instance. @@ -117,37 +114,24 @@ where { /// Create a new `Ext`. #[cfg(not(feature = "std"))] - pub fn new( - overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, - backend: &'a B, - ) -> Self { - Ext { overlay, backend, id: 0, storage_transaction_cache } + pub fn new(overlay: &'a mut OverlayedChanges, backend: &'a B) -> Self { + Ext { overlay, backend, id: 0 } } /// Create a new `Ext` from overlayed changes and read-only backend #[cfg(feature = "std")] pub fn new( - overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, + overlay: &'a mut OverlayedChanges, backend: &'a B, extensions: Option<&'a mut sp_externalities::Extensions>, ) -> Self { Self { overlay, backend, - storage_transaction_cache, id: rand::random(), extensions: extensions.map(OverlayedExtensions::new), } } - - /// Invalidates the currently cached storage root and the db transaction. - /// - /// Called when there are changes that likely will invalidate the storage root. - fn mark_dirty(&mut self) { - self.storage_transaction_cache.reset(); - } } #[cfg(test)] @@ -412,7 +396,6 @@ where ), ); - self.mark_dirty(); self.overlay.set_storage(key, value); } @@ -432,7 +415,6 @@ where ); let _guard = guard(); - self.mark_dirty(); self.overlay.set_child_storage(child_info, key, value); } @@ -449,7 +431,6 @@ where child_info = %HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - self.mark_dirty(); let overlay = self.overlay.clear_child_storage(child_info); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor); @@ -478,7 +459,6 @@ where return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } - self.mark_dirty(); let overlay = self.overlay.clear_prefix(prefix); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor); @@ -501,7 +481,6 @@ where ); let _guard = guard(); - self.mark_dirty(); let overlay = self.overlay.clear_child_prefix(child_info, prefix); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend( Some(child_info), @@ -522,7 +501,6 @@ where ); let _guard = guard(); - self.mark_dirty(); let backend = &mut self.backend; let current_value = self.overlay.value_mut_or_insert_with(&key, || { @@ -533,27 +511,17 @@ where fn storage_root(&mut self, state_version: StateVersion) -> Vec { let _guard = guard(); - if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { - trace!( - target: "state", - method = "StorageRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - storage_root = %HexDisplay::from(&root.as_ref()), - cached = true, - ); - return root.encode() - } - let root = - self.overlay - .storage_root(self.backend, self.storage_transaction_cache, state_version); + let (root, _cached) = self.overlay.storage_root(self.backend, state_version); + trace!( target: "state", method = "StorageRoot", ext_id = %HexDisplay::from(&self.id.to_le_bytes()), storage_root = %HexDisplay::from(&root.as_ref()), - cached = false, + cached = %_cached, ); + root.encode() } @@ -563,74 +531,22 @@ where state_version: StateVersion, ) -> Vec { let _guard = guard(); - let storage_key = child_info.storage_key(); - let prefixed_storage_key = child_info.prefixed_storage_key(); - if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self - .storage(prefixed_storage_key.as_slice()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - // V1 is equivalent to V0 on empty root. - .unwrap_or_else(empty_child_trie_root::>); - trace!( - target: "state", - method = "ChildStorageRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - child_info = %HexDisplay::from(&storage_key), - storage_root = %HexDisplay::from(&root.as_ref()), - cached = true, - ); - root.encode() - } else { - let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { - let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta, state_version)) - } else { - None - }; - - if let Some((root, is_empty, _)) = root { - let root = root.encode(); - // We store update in the overlay in order to be able to use - // 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying - // the trie backend for storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(prefixed_storage_key.into_inner(), None); - } else { - self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); - } - trace!( - target: "state", - method = "ChildStorageRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - child_info = %HexDisplay::from(&storage_key), - storage_root = %HexDisplay::from(&root.as_ref()), - cached = false, - ); + let (root, _cached) = self + .overlay + .child_storage_root(child_info, self.backend, state_version) + .expect(EXT_NOT_ALLOWED_TO_FAIL); - root - } else { - // empty overlay - let root = self - .storage(prefixed_storage_key.as_slice()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - // V1 is equivalent to V0 on empty root. - .unwrap_or_else(empty_child_trie_root::>); - - trace!( - target: "state", - method = "ChildStorageRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - child_info = %HexDisplay::from(&storage_key), - storage_root = %HexDisplay::from(&root.as_ref()), - cached = false, - ); + trace!( + target: "state", + method = "ChildStorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = %_cached, + ); - root.encode() - } - } + root.encode() } fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) { @@ -669,7 +585,6 @@ where } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - self.mark_dirty(); self.overlay.rollback_transaction().map_err(|_| ()) } @@ -682,14 +597,9 @@ where self.overlay.rollback_transaction().expect(BENCHMARKING_FN); } self.overlay - .drain_storage_changes( - self.backend, - self.storage_transaction_cache, - Default::default(), // using any state - ) + .drain_storage_changes(self.backend, Default::default()) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); - self.mark_dirty(); self.overlay .enter_runtime() .expect("We have reset the overlay above, so we can not be in the runtime; qed"); @@ -703,7 +613,7 @@ where } let changes = self .overlay - .drain_storage_changes(self.backend, self.storage_transaction_cache, state_version) + .drain_storage_changes(self.backend, state_version) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend .commit( @@ -713,7 +623,6 @@ where changes.child_storage_changes, ) .expect(EXT_NOT_ALLOWED_TO_FAIL); - self.mark_dirty(); self.overlay .enter_runtime() .expect("We have reset the overlay above, so we can not be in the runtime; qed"); @@ -914,7 +823,7 @@ where mod tests { use super::*; use crate::InMemoryBackend; - use codec::Encode; + use codec::{Decode, Encode}; use sp_core::{ map, storage::{Storage, StorageChild}, @@ -926,7 +835,6 @@ mod tests { #[test] fn next_storage_key_works() { - let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); @@ -943,7 +851,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -959,7 +867,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -967,7 +875,6 @@ mod tests { #[test] fn next_storage_key_works_with_a_lot_empty_values_in_overlay() { - let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![21], None); @@ -990,7 +897,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -1002,7 +909,6 @@ mod tests { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; - let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); @@ -1024,7 +930,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -1040,7 +946,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -1050,7 +956,6 @@ mod tests { fn child_storage_works() { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; - let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); @@ -1072,7 +977,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1094,7 +999,6 @@ mod tests { fn clear_prefix_cannot_delete_a_child_root() { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; - let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); let backend = ( Storage { @@ -1112,7 +1016,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); + let ext = TestExt::new(&mut overlay, &backend, None); use sp_core::storage::well_known_keys; let mut ext = ext; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 2c3ed7441501c..ce551cec2a473 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,36 +24,22 @@ use crate::{ use codec::Codec; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion, Storage}; -use sp_trie::{empty_trie_root, GenericMemoryDB, HashKey, KeyFunction, LayoutV1, MemoryDB}; +use sp_trie::{empty_trie_root, LayoutV1, PrefixedMemoryDB}; use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. -/// -/// It will use [`HashKey`] to store the keys internally. -pub fn new_in_mem_hash_key() -> TrieBackend, H> +pub fn new_in_mem() -> TrieBackend, H> where H: Hasher, H::Out: Codec + Ord, { - new_in_mem::>() -} - -/// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> -where - H: Hasher, - H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, -{ - let db = GenericMemoryDB::default(); // V1 is same as V0 for an empty trie. - TrieBackendBuilder::new(db, empty_trie_root::>()).build() + TrieBackendBuilder::new(Default::default(), empty_trie_root::>()).build() } -impl TrieBackend, H> +impl TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { /// Copy the state, with applied updates pub fn update, StorageCollection)>>( @@ -85,15 +71,16 @@ where } /// Merge trie nodes into this backend. - pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB) -> Self { + pub fn update_backend(&self, root: H::Out, changes: PrefixedMemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); TrieBackendBuilder::new(clone, root).build() } /// Apply the given transaction to this backend and set the root to the given value. - pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB) { + pub fn apply_transaction(&mut self, root: H::Out, transaction: PrefixedMemoryDB) { let mut storage = sp_std::mem::take(self).into_storage(); + storage.consolidate(transaction); *self = TrieBackendBuilder::new(storage, root).build(); } @@ -104,33 +91,29 @@ where } } -impl Clone for TrieBackend, H> +impl Clone for TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn clone(&self) -> Self { TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build() } } -impl Default for TrieBackend, H> +impl Default for TrieBackend, H> where H: Hasher, H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn default() -> Self { new_in_mem() } } -impl - From<(HashMap, BTreeMap>, StateVersion)> - for TrieBackend, H> +impl From<(HashMap, BTreeMap>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn from( (inner, state_version): ( @@ -149,10 +132,9 @@ where } } -impl From<(Storage, StateVersion)> for TrieBackend, H> +impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners @@ -165,11 +147,10 @@ where } } -impl From<(BTreeMap, StateVersion)> - for TrieBackend, H> +impl From<(BTreeMap, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn from((inner, state_version): (BTreeMap, StateVersion)) -> Self { let mut expanded = HashMap::new(); @@ -178,11 +159,10 @@ where } } -impl From<(Vec<(Option, StorageCollection)>, StateVersion)> - for TrieBackend, H> +impl From<(Vec<(Option, StorageCollection)>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, - KF: KeyFunction + Send + Sync, { fn from( (inner, state_version): (Vec<(Option, StorageCollection)>, StateVersion), @@ -212,7 +192,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let state_version = StateVersion::default(); - let storage = new_in_mem_hash_key::(); + let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let storage = storage.update( @@ -228,7 +208,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { let state_version = StateVersion::default(); - let mut storage = new_in_mem_hash_key::(); + let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); storage.insert( diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 0001d0026c394..0e2b9bfdfffcf 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -36,6 +36,8 @@ mod testing; mod trie_backend; mod trie_backend_essence; +pub use trie_backend::TrieCacheProvider; + #[cfg(feature = "std")] pub use std_reexport::*; @@ -123,13 +125,13 @@ impl sp_std::fmt::Display for DefaultError { } pub use crate::{ - backend::{Backend, IterArgs, KeysIter, PairsIter, StorageIterator}, + backend::{Backend, BackendTransaction, IterArgs, KeysIter, PairsIter, StorageIterator}, error::{Error, ExecutionError}, ext::Ext, overlayed_changes::{ ChildStorageCollection, IndexOperation, OffchainChangesCollection, OffchainOverlayedChanges, OverlayedChanges, StorageChanges, StorageCollection, StorageKey, - StorageTransactionCache, StorageValue, + StorageValue, }, stats::{StateMachineStats, UsageInfo, UsageUnit}, trie_backend::{TrieBackend, TrieBackendBuilder}, @@ -141,7 +143,7 @@ mod std_reexport { pub use crate::{ basic::BasicExternalities, error::{Error, ExecutionError}, - in_memory_backend::{new_in_mem, new_in_mem_hash_key}, + in_memory_backend::new_in_mem, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, trie_backend::create_proof_check_backend, @@ -166,14 +168,8 @@ mod execution { traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_externalities::Extensions; - use std::{ - collections::{HashMap, HashSet}, - fmt, - }; - - const PROOF_CLOSE_TRANSACTION: &str = "\ - Closing a transaction that was started in this function. Client initiated transactions - are protected from being closed by the runtime. qed"; + use sp_trie::PrefixedMemoryDB; + use std::collections::{HashMap, HashSet}; pub(crate) type CallResult = Result, E>; @@ -181,22 +177,7 @@ mod execution { pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Trie backend with in-memory storage. - pub type InMemoryBackend = TrieBackend, H>; - - /// Strategy for executing a call into the runtime. - #[derive(Copy, Clone, Eq, PartialEq, Debug)] - pub enum ExecutionStrategy { - /// Execute with the native equivalent if it is compatible with the given wasm module; - /// otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. - AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Report any discrepancy - /// as an error. - Both, - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, - } + pub type InMemoryBackend = TrieBackend, H>; /// Storage backend trust level. #[derive(Debug, Clone)] @@ -209,73 +190,6 @@ mod execution { Untrusted, } - /// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure. - #[derive(Clone)] - pub enum ExecutionManager { - /// Execute with the native equivalent if it is compatible with the given wasm module; - /// otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. The backend on which code is executed code could be - /// trusted to provide all storage or not (i.e. the light client cannot be trusted to - /// provide for all storage queries since the storage entries it has come from an external - /// node). - AlwaysWasm(BackendTrustLevel), - /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of - /// any discrepancy. - Both(F), - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, - } - - impl<'a, F> From<&'a ExecutionManager> for ExecutionStrategy { - fn from(s: &'a ExecutionManager) -> Self { - match *s { - ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, - ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm, - ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, - ExecutionManager::Both(_) => ExecutionStrategy::Both, - } - } - } - - impl ExecutionStrategy { - /// Gets the corresponding manager for the execution strategy. - pub fn get_manager(self) -> ExecutionManager> { - match self { - ExecutionStrategy::AlwaysWasm => - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), - ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, - ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, - ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { - warn!( - "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, native_result, - ); - warn!(" Native result {:?}", native_result); - warn!(" Wasm result {:?}", wasm_result); - wasm_result - }), - } - } - } - - /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. - pub fn native_else_wasm() -> ExecutionManager> { - ExecutionManager::NativeElseWasm - } - - /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out - /// the type. - fn always_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) - } - - /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out - /// the type. - fn always_untrusted_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) - } - /// The substrate state machine. pub struct StateMachine<'a, B, H, Exec> where @@ -286,9 +200,8 @@ mod execution { exec: &'a Exec, method: &'a str, call_data: &'a [u8], - overlay: &'a mut OverlayedChanges, - extensions: Extensions, - storage_transaction_cache: Option<&'a mut StorageTransactionCache>, + overlay: &'a mut OverlayedChanges, + extensions: &'a mut Extensions, runtime_code: &'a RuntimeCode<'a>, stats: StateMachineStats, /// The hash of the block the state machine will be executed on. @@ -318,11 +231,11 @@ mod execution { /// Creates new substrate state machine. pub fn new( backend: &'a B, - overlay: &'a mut OverlayedChanges, + overlay: &'a mut OverlayedChanges, exec: &'a Exec, method: &'a str, call_data: &'a [u8], - extensions: Extensions, + extensions: &'a mut Extensions, runtime_code: &'a RuntimeCode, context: CallContext, ) -> Self { @@ -333,7 +246,6 @@ mod execution { call_data, extensions, overlay, - storage_transaction_cache: None, runtime_code, stats: StateMachineStats::default(), parent_hash: None, @@ -341,19 +253,6 @@ mod execution { } } - /// Use given `cache` as storage transaction cache. - /// - /// The cache will be used to cache storage transactions that can be build while executing a - /// function in the runtime. For example, when calculating the storage root a transaction is - /// build that will be cached. - pub fn with_storage_transaction_cache( - mut self, - cache: Option<&'a mut StorageTransactionCache>, - ) -> Self { - self.storage_transaction_cache = cache; - self - } - /// Set the given `parent_hash` as the hash of the parent block. /// /// This will be used for improved logging. @@ -370,25 +269,12 @@ mod execution { /// blocks (e.g. a transaction at a time), ensure a different method is used. /// /// Returns the SCALE encoded result of the executed function. - pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { - // We are not giving a native call and thus we are sure that the result can never be a - // native value. - self.execute_using_consensus_failure_handler(strategy.get_manager()) - } - - fn execute_aux(&mut self, use_native: bool) -> (CallResult, bool) { - let mut cache = StorageTransactionCache::default(); - - let cache = match self.storage_transaction_cache.as_mut() { - Some(cache) => cache, - None => &mut cache, - }; - + pub fn execute(&mut self) -> Result, Box> { self.overlay .enter_runtime() .expect("StateMachine is never called from the runtime; qed"); - let mut ext = Ext::new(self.overlay, cache, self.backend, Some(&mut self.extensions)); + let mut ext = Ext::new(self.overlay, self.backend, Some(self.extensions)); let ext_id = ext.id; @@ -401,14 +287,10 @@ mod execution { "Call", ); - let (result, was_native) = self.exec.call( - &mut ext, - self.runtime_code, - self.method, - self.call_data, - use_native, - self.context, - ); + let result = self + .exec + .call(&mut ext, self.runtime_code, self.method, self.call_data, false, self.context) + .0; self.overlay .exit_runtime() @@ -417,99 +299,18 @@ mod execution { trace!( target: "state", ext_id = %HexDisplay::from(&ext_id.to_le_bytes()), - ?was_native, ?result, "Return", ); - (result, was_native) - } - - fn execute_call_with_both_strategy( - &mut self, - on_consensus_failure: Handler, - ) -> CallResult - where - Handler: - FnOnce(CallResult, CallResult) -> CallResult, - { - self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true); - - if was_native { - self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux(false); - - if (result.is_ok() && - wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || - result.is_err() && wasm_result.is_err() - { - result - } else { - on_consensus_failure(wasm_result, result) - } - } else { - self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); - result - } - } - - fn execute_call_with_native_else_wasm_strategy(&mut self) -> CallResult { - self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true); - - if !was_native || result.is_ok() { - self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); - result - } else { - self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - self.execute_aux(false).0 - } - } - - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - /// - /// Returns the result of the executed function either in native representation `R` or - /// in SCALE encoded representation. - pub fn execute_using_consensus_failure_handler( - &mut self, - manager: ExecutionManager, - ) -> Result, Box> - where - Handler: - FnOnce(CallResult, CallResult) -> CallResult, - { - let result = { - match manager { - ExecutionManager::Both(on_consensus_failure) => - self.execute_call_with_both_strategy(on_consensus_failure), - ExecutionManager::NativeElseWasm => - self.execute_call_with_native_else_wasm_strategy(), - ExecutionManager::AlwaysWasm(trust_level) => { - let _abort_guard = match trust_level { - BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => - Some(sp_panic_handler::AbortGuard::never_abort()), - }; - self.execute_aux(false).0 - }, - ExecutionManager::NativeWhenPossible => self.execute_aux(true).0, - } - }; - - result.map_err(|e| Box::new(e) as _) + result.map_err(|e| Box::new(e) as Box<_>) } } /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( backend: &mut B, - overlay: &mut OverlayedChanges, + overlay: &mut OverlayedChanges, exec: &Exec, method: &str, call_data: &[u8], @@ -529,7 +330,7 @@ mod execution { method, call_data, runtime_code, - Default::default(), + &mut Default::default(), ) } @@ -544,12 +345,12 @@ mod execution { /// blocks (e.g. a transaction at a time), ensure a different method is used. pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, - overlay: &mut OverlayedChanges, + overlay: &mut OverlayedChanges, exec: &Exec, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, - extensions: Extensions, + extensions: &mut Extensions, ) -> Result<(Vec, StorageProof), Box> where S: trie_backend_essence::TrieBackendStorage, @@ -570,7 +371,7 @@ mod execution { runtime_code, CallContext::Offchain, ) - .execute_using_consensus_failure_handler::<_>(always_wasm())?; + .execute()?; let proof = proving_backend .extract_proof() @@ -583,7 +384,7 @@ mod execution { pub fn execution_proof_check( root: H::Out, proof: StorageProof, - overlay: &mut OverlayedChanges, + overlay: &mut OverlayedChanges, exec: &Exec, method: &str, call_data: &[u8], @@ -608,7 +409,7 @@ mod execution { /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, - overlay: &mut OverlayedChanges, + overlay: &mut OverlayedChanges, exec: &Exec, method: &str, call_data: &[u8], @@ -625,11 +426,11 @@ mod execution { exec, method, call_data, - Extensions::default(), + &mut Extensions::default(), runtime_code, CallContext::Offchain, ) - .execute_using_consensus_failure_handler(always_untrusted_wasm()) + .execute() } /// Generate storage read proof. @@ -1287,7 +1088,7 @@ mod execution { #[cfg(test)] mod tests { use super::{backend::AsTrieBackend, ext::Ext, *}; - use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; + use crate::{execution::CallResult, in_memory_backend::new_in_mem}; use assert_matches::assert_matches; use codec::Encode; use sp_core::{ @@ -1354,6 +1155,7 @@ mod tests { let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); + let mut execution_extensions = &mut Default::default(); let mut state_machine = StateMachine::new( &backend, @@ -1365,12 +1167,12 @@ mod tests { }, "test", &[], - Default::default(), + &mut execution_extensions, &wasm_code, CallContext::Offchain, ); - assert_eq!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), vec![66]); + assert_eq!(state_machine.execute().unwrap(), vec![66]); } #[test] @@ -1382,6 +1184,7 @@ mod tests { let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); + let mut execution_extensions = &mut Default::default(); let mut state_machine = StateMachine::new( &backend, @@ -1393,47 +1196,12 @@ mod tests { }, "test", &[], - Default::default(), - &wasm_code, - CallContext::Offchain, - ); - - assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]); - } - - #[test] - fn dual_execution_strategy_detects_consensus_failure() { - dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V0); - dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V1); - } - fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) { - let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(state_version, None, None); - let mut overlayed_changes = Default::default(); - let wasm_code = RuntimeCode::empty(); - - let mut state_machine = StateMachine::new( - &backend, - &mut overlayed_changes, - &DummyCodeExecutor { - native_available: true, - native_succeeds: true, - fallback_succeeds: false, - }, - "test", - &[], - Default::default(), + &mut execution_extensions, &wasm_code, CallContext::Offchain, ); - assert!(state_machine - .execute_using_consensus_failure_handler(ExecutionManager::Both(|we, _ne| { - consensus_failed = true; - we - }),) - .is_err()); - assert!(consensus_failed); + assert_eq!(state_machine.execute().unwrap(), vec![66]); } #[test] @@ -1498,8 +1266,7 @@ mod tests { let overlay_limit = overlay.clone(); { - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); let _ = ext.clear_prefix(b"ab", None, None); } overlay.commit_transaction().unwrap(); @@ -1522,8 +1289,7 @@ mod tests { let mut overlay = overlay_limit; { - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_matches!( ext.clear_prefix(b"ab", Some(1), None).deconstruct(), (Some(_), 1, 3, 1) @@ -1567,8 +1333,7 @@ mod tests { overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); { - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); + let mut ext = Ext::new(&mut overlay, &backend, None); let r = ext.kill_child_storage(&child_info, Some(2), None); assert_matches!(r.deconstruct(), (Some(_), 2, 6, 2)); } @@ -1603,8 +1368,7 @@ mod tests { ]; let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); + let mut ext = Ext::new(&mut overlay, &backend, None); let r = ext.kill_child_storage(&child_info, Some(0), None).deconstruct(); assert_matches!(r, (Some(_), 0, 0, 0)); let r = ext @@ -1633,8 +1397,7 @@ mod tests { ]; let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); + let mut ext = Ext::new(&mut overlay, &backend, None); assert_eq!(ext.kill_child_storage(&child_info, None, None).deconstruct(), (None, 4, 4, 4)); } @@ -1642,11 +1405,10 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let state = new_in_mem_hash_key::(); + let state = new_in_mem::(); let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); @@ -1658,19 +1420,18 @@ mod tests { fn append_storage_works() { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); - let state = new_in_mem_hash_key::(); + let state = new_in_mem::(); let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::default(); { - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); ext.storage_append(key.clone(), reference_data[0].encode()); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } overlay.start_transaction(); { - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); for i in reference_data.iter().skip(1) { ext.storage_append(key.clone(), i.encode()); @@ -1679,7 +1440,7 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new(&mut overlay, &mut cache, backend, None); + let ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } @@ -1694,14 +1455,13 @@ mod tests { } let key = b"events".to_vec(); - let mut cache = StorageTransactionCache::default(); - let state = new_in_mem_hash_key::(); + let state = new_in_mem::(); let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. { - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); ext.clear_storage(key.as_slice()); ext.storage_append(key.clone(), Item::InitializationItem.encode()); } @@ -1709,7 +1469,7 @@ mod tests { // For example, first transaction resulted in panic during block building { - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1724,7 +1484,7 @@ mod tests { // Then we apply next transaction which is valid this time. { - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1739,7 +1499,7 @@ mod tests { // Then only initlaization item and second (committed) item should persist. { - let ext = Ext::new(&mut overlay, &mut cache, backend, None); + let ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), @@ -2156,12 +1916,11 @@ mod tests { let mut transaction = { let backend = test_trie(state_version, None, None); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); + let mut ext = Ext::new(&mut overlay, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(state_version); - cache.transaction.unwrap() + overlay.drain_storage_changes(&backend, state_version).unwrap().transaction }; let mut duplicate = false; for (k, (value, rc)) in transaction.drain().iter() { @@ -2193,8 +1952,7 @@ mod tests { assert_eq!(overlay.storage(b"bbb"), None); { - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(b"bbb"), Some(vec![])); assert_eq!(ext.storage(b"ccc"), Some(vec![])); ext.clear_storage(b"ccc"); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index b32df635b177c..28cfecf1dbd62 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -21,7 +21,7 @@ mod changeset; mod offchain; use self::changeset::OverlayedChangeSet; -use crate::{backend::Backend, stats::StateMachineStats, DefaultError}; +use crate::{backend::Backend, stats::StateMachineStats, BackendTransaction, DefaultError}; use codec::{Decode, Encode}; use hash_db::Hasher; pub use offchain::OffchainOverlayedChanges; @@ -34,6 +34,7 @@ use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] use sp_std::collections::btree_map::BTreeMap as Map; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; +use sp_trie::{empty_child_trie_root, LayoutV1}; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; #[cfg(feature = "std")] @@ -88,8 +89,7 @@ impl Extrinsics { /// The set of changes that are overlaid onto the backend. /// /// It allows changes to be modified using nestable transactions. -#[derive(Debug, Default, Clone)] -pub struct OverlayedChanges { +pub struct OverlayedChanges { /// Top level storage changes. top: OverlayedChangeSet, /// Child storage changes. The map key is the child storage key without the common prefix. @@ -102,6 +102,52 @@ pub struct OverlayedChanges { collect_extrinsics: bool, /// Collect statistic on this execution. stats: StateMachineStats, + /// Caches the "storage transaction" that is created while calling `storage_root`. + /// + /// This transaction can be applied to the backend to persist the state changes. + storage_transaction_cache: Option>, +} + +impl Default for OverlayedChanges { + fn default() -> Self { + Self { + top: Default::default(), + children: Default::default(), + offchain: Default::default(), + transaction_index_ops: Default::default(), + collect_extrinsics: Default::default(), + stats: Default::default(), + storage_transaction_cache: None, + } + } +} + +impl Clone for OverlayedChanges { + fn clone(&self) -> Self { + Self { + top: self.top.clone(), + children: self.children.clone(), + offchain: self.offchain.clone(), + transaction_index_ops: self.transaction_index_ops.clone(), + collect_extrinsics: self.collect_extrinsics, + stats: self.stats.clone(), + storage_transaction_cache: self.storage_transaction_cache.clone(), + } + } +} + +impl sp_std::fmt::Debug for OverlayedChanges { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("OverlayedChanges") + .field("top", &self.top) + .field("children", &self.children) + .field("offchain", &self.offchain) + .field("transaction_index_ops", &self.transaction_index_ops) + .field("collect_extrinsics", &self.collect_extrinsics) + .field("stats", &self.stats) + .field("storage_transaction_cache", &self.storage_transaction_cache) + .finish() + } } /// Transaction index operation. @@ -129,7 +175,7 @@ pub enum IndexOperation { /// /// This contains all the changes to the storage and transactions to apply theses changes to the /// backend. -pub struct StorageChanges { +pub struct StorageChanges { /// All changes to the main storage. /// /// A value of `None` means that it was deleted. @@ -142,7 +188,7 @@ pub struct StorageChanges { /// [`main_storage_changes`](StorageChanges::main_storage_changes) and from /// [`child_storage_changes`](StorageChanges::child_storage_changes). /// [`offchain_storage_changes`](StorageChanges::offchain_storage_changes). - pub transaction: Transaction, + pub transaction: BackendTransaction, /// The storage root after applying the transaction. pub transaction_storage_root: H::Out, /// Changes to the transaction index, @@ -151,7 +197,7 @@ pub struct StorageChanges { } #[cfg(feature = "std")] -impl StorageChanges { +impl StorageChanges { /// Deconstruct into the inner values pub fn into_inner( self, @@ -159,7 +205,7 @@ impl StorageChanges { StorageCollection, ChildStorageCollection, OffchainChangesCollection, - Transaction, + BackendTransaction, H::Out, Vec, ) { @@ -174,44 +220,60 @@ impl StorageChanges { } } +impl Default for StorageChanges { + fn default() -> Self { + Self { + main_storage_changes: Default::default(), + child_storage_changes: Default::default(), + offchain_storage_changes: Default::default(), + transaction: Default::default(), + transaction_storage_root: Default::default(), + #[cfg(feature = "std")] + transaction_index_changes: Default::default(), + } + } +} + /// Storage transactions are calculated as part of the `storage_root`. /// These transactions can be reused for importing the block into the /// storage. So, we cache them to not require a recomputation of those transactions. -pub struct StorageTransactionCache { +struct StorageTransactionCache { /// Contains the changes for the main and the child storages as one transaction. - pub(crate) transaction: Option, + transaction: BackendTransaction, /// The storage root after applying the transaction. - pub(crate) transaction_storage_root: Option, + transaction_storage_root: H::Out, } -impl StorageTransactionCache { - /// Reset the cached transactions. - pub fn reset(&mut self) { - *self = Self::default(); +impl StorageTransactionCache { + fn into_inner(self) -> (BackendTransaction, H::Out) { + (self.transaction, self.transaction_storage_root) } } -impl Default for StorageTransactionCache { - fn default() -> Self { - Self { transaction: None, transaction_storage_root: None } +impl Clone for StorageTransactionCache { + fn clone(&self) -> Self { + Self { + transaction: self.transaction.clone(), + transaction_storage_root: self.transaction_storage_root, + } } } -impl Default for StorageChanges { - fn default() -> Self { - Self { - main_storage_changes: Default::default(), - child_storage_changes: Default::default(), - offchain_storage_changes: Default::default(), - transaction: Default::default(), - transaction_storage_root: Default::default(), - #[cfg(feature = "std")] - transaction_index_changes: Default::default(), - } +impl sp_std::fmt::Debug for StorageTransactionCache { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut debug = f.debug_struct("StorageTransactionCache"); + + #[cfg(feature = "std")] + debug.field("transaction_storage_root", &self.transaction_storage_root); + + #[cfg(not(feature = "std"))] + debug.field("transaction_storage_root", &self.transaction_storage_root.as_ref()); + + debug.finish() } } -impl OverlayedChanges { +impl OverlayedChanges { /// Whether no changes are contained in the top nor in any of the child changes. pub fn is_empty(&self) -> bool { self.top.is_empty() && self.children.is_empty() @@ -234,6 +296,12 @@ impl OverlayedChanges { }) } + /// Should be called when there are changes that require to reset the + /// `storage_transaction_cache`. + fn mark_dirty(&mut self) { + self.storage_transaction_cache = None; + } + /// Returns mutable reference to current value. /// If there is no value in the overlay, the given callback is used to initiate the value. /// Warning this function registers a change, so the mutable reference MUST be modified. @@ -245,6 +313,8 @@ impl OverlayedChanges { key: &[u8], init: impl Fn() -> StorageValue, ) -> &mut StorageValue { + self.mark_dirty(); + let value = self.top.modify(key.to_vec(), init, self.extrinsic_index()); // if the value was deleted initialise it back with an empty vec @@ -266,6 +336,8 @@ impl OverlayedChanges { /// /// Can be rolled back or committed when called inside a transaction. pub fn set_storage(&mut self, key: StorageKey, val: Option) { + self.mark_dirty(); + let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); self.top.set(key, val, self.extrinsic_index()); @@ -282,6 +354,8 @@ impl OverlayedChanges { key: StorageKey, val: Option, ) { + self.mark_dirty(); + let extrinsic_index = self.extrinsic_index(); let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); @@ -300,6 +374,8 @@ impl OverlayedChanges { /// /// Can be rolled back or committed when called inside a transaction. pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { + self.mark_dirty(); + let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; @@ -316,6 +392,8 @@ impl OverlayedChanges { /// /// Can be rolled back or committed when called inside a transaction. pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { + self.mark_dirty(); + self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) } @@ -323,6 +401,8 @@ impl OverlayedChanges { /// /// Can be rolled back or committed when called inside a transaction pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { + self.mark_dirty(); + let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; @@ -364,6 +444,8 @@ impl OverlayedChanges { /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.mark_dirty(); + self.top.rollback_transaction()?; retain_map(&mut self.children, |_, (changeset, _)| { changeset @@ -432,32 +514,6 @@ impl OverlayedChanges { Ok(()) } - /// Consume all changes (top + children) and return them. - /// - /// After calling this function no more changes are contained in this changeset. - /// - /// Panics: - /// Panics if `transaction_depth() > 0` - fn drain_committed( - &mut self, - ) -> ( - impl Iterator)>, - impl Iterator< - Item = ( - StorageKey, - (impl Iterator)>, ChildInfo), - ), - >, - ) { - use sp_std::mem::take; - ( - take(&mut self.top).drain_commited(), - take(&mut self.children) - .into_iter() - .map(|(key, (val, info))| (key, (val.drain_commited(), info))), - ) - } - /// Consume all changes (top + children) and return them. /// /// After calling this function no more changes are contained in this changeset. @@ -495,42 +551,33 @@ impl OverlayedChanges { &self.transaction_index_ops } - /// Convert this instance with all changes into a [`StorageChanges`] instance. - #[cfg(feature = "std")] - pub fn into_storage_changes, H: Hasher>( - mut self, - backend: &B, - mut cache: StorageTransactionCache, - state_version: StateVersion, - ) -> Result, DefaultError> - where - H::Out: Ord + Encode + 'static, - { - self.drain_storage_changes(backend, &mut cache, state_version) - } - /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. - pub fn drain_storage_changes, H: Hasher>( + pub fn drain_storage_changes>( &mut self, backend: &B, - cache: &mut StorageTransactionCache, state_version: StateVersion, - ) -> Result, DefaultError> + ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { - // If the transaction does not exist, we generate it. - if cache.transaction.is_none() { - self.storage_root(backend, cache, state_version); - } + let (transaction, transaction_storage_root) = match self.storage_transaction_cache.take() { + Some(cache) => cache.into_inner(), + // If the transaction does not exist, we generate it. + None => { + self.storage_root(backend, state_version); + self.storage_transaction_cache + .take() + .expect("`storage_transaction_cache` was just initialized; qed") + .into_inner() + }, + }; - let (transaction, transaction_storage_root) = cache - .transaction - .take() - .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) - .expect("Transaction was be generated as part of `storage_root`; qed"); + use sp_std::mem::take; + let main_storage_changes = take(&mut self.top).drain_commited(); + let child_storage_changes = take(&mut self.children) + .into_iter() + .map(|(key, (val, info))| (key, (val.drain_commited(), info))); - let (main_storage_changes, child_storage_changes) = self.drain_committed(); let offchain_storage_changes = self.offchain_drain_committed().collect(); #[cfg(feature = "std")] @@ -562,29 +609,29 @@ impl OverlayedChanges { /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. fn extrinsic_index(&self) -> Option { - match self.collect_extrinsics { - true => Some( - self.storage(EXTRINSIC_INDEX) - .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) - .unwrap_or(NO_EXTRINSIC_INDEX), - ), - false => None, - } + self.collect_extrinsics.then(|| { + self.storage(EXTRINSIC_INDEX) + .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) + .unwrap_or(NO_EXTRINSIC_INDEX) + }) } /// Generate the storage root using `backend` and all changes /// as seen by the current transaction. /// - /// Returns the storage root and caches storage transaction in the given `cache`. - pub fn storage_root>( - &self, + /// Returns the storage root and whether it was already cached. + pub fn storage_root>( + &mut self, backend: &B, - cache: &mut StorageTransactionCache, state_version: StateVersion, - ) -> H::Out + ) -> (H::Out, bool) where H::Out: Ord + Encode, { + if let Some(cache) = &self.storage_transaction_cache { + return (cache.transaction_storage_root, true) + } + let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); let child_delta = self.children().map(|(changes, info)| { (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) @@ -592,10 +639,72 @@ impl OverlayedChanges { let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); - cache.transaction = Some(transaction); - cache.transaction_storage_root = Some(root); + self.storage_transaction_cache = + Some(StorageTransactionCache { transaction, transaction_storage_root: root }); + + (root, false) + } + + /// Generate the child storage root using `backend` and all child changes + /// as seen by the current transaction. + /// + /// Returns the child storage root and whether it was already cached. + pub fn child_storage_root>( + &mut self, + child_info: &ChildInfo, + backend: &B, + state_version: StateVersion, + ) -> Result<(H::Out, bool), B::Error> + where + H::Out: Ord + Encode + Decode, + { + let storage_key = child_info.storage_key(); + let prefixed_storage_key = child_info.prefixed_storage_key(); + + if self.storage_transaction_cache.is_some() { + let root = self + .storage(prefixed_storage_key.as_slice()) + .map(|v| Ok(v.map(|v| v.to_vec()))) + .or_else(|| backend.storage(prefixed_storage_key.as_slice()).map(Some).transpose()) + .transpose()? + .flatten() + .and_then(|k| Decode::decode(&mut &k[..]).ok()) + // V1 is equivalent to V0 on empty root. + .unwrap_or_else(empty_child_trie_root::>); + + return Ok((root, true)) + } - root + let root = if let Some((changes, info)) = self.child_changes(storage_key) { + let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); + Some(backend.child_storage_root(info, delta, state_version)) + } else { + None + }; + + let root = if let Some((root, is_empty, _)) = root { + // We store update in the overlay in order to be able to use + // 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying + // the trie backend for storage root. + // A better design would be to manage 'child_storage_transaction' in a + // similar way as 'storage_transaction' but for each child trie. + self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode())); + + self.mark_dirty(); + + root + } else { + // empty overlay + let root = backend + .storage(prefixed_storage_key.as_slice())? + .and_then(|k| Decode::decode(&mut &k[..]).ok()) + // V1 is equivalent to V0 on empty root. + .unwrap_or_else(empty_child_trie_root::>); + + root + }; + + Ok((root, false)) } /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) @@ -639,7 +748,7 @@ impl OverlayedChanges { } #[cfg(feature = "std")] -impl From for OverlayedChanges { +impl From for OverlayedChanges { fn from(storage: sp_core::storage::Storage) -> Self { Self { top: storage.top.into(), @@ -742,7 +851,8 @@ impl<'a> OverlayedExtensions<'a> { #[cfg(test)] mod tests { use super::*; - use crate::{ext::Ext, InMemoryBackend}; + use crate::{ext::Ext, new_in_mem, InMemoryBackend}; + use array_bytes::bytes2hex; use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; @@ -755,7 +865,7 @@ mod tests { #[test] fn overlayed_storage_works() { - let mut overlayed = OverlayedChanges::default(); + let mut overlayed = OverlayedChanges::::default(); let key = vec![42, 69, 169, 142]; @@ -790,7 +900,7 @@ mod tests { fn offchain_overlayed_storage_transactions_works() { use sp_core::offchain::STORAGE_PREFIX; fn check_offchain_content( - state: &OverlayedChanges, + state: &OverlayedChanges, nb_commit: usize, expected: Vec<(Vec, Option>)>, ) { @@ -867,18 +977,61 @@ mod tests { overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); overlay.set_storage(b"doug".to_vec(), None); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); - let root = array_bytes::hex2bytes_unchecked( - "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa", - ); + { + let mut ext = Ext::new(&mut overlay, &backend, None); + let root = "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"; + + assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root); + // Calling a second time should use it from the cache + assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root); + } + + // Check that the storage root is recalculated + overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec())); - assert_eq!(&ext.storage_root(state_version)[..], &root); + let mut ext = Ext::new(&mut overlay, &backend, None); + let root = "5c0a4e35cb967de785e1cb8743e6f24b6ff6d45155317f2078f6eb3fc4ff3e3d"; + assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root); + } + + #[test] + fn overlayed_child_storage_root_works() { + let state_version = StateVersion::default(); + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let backend = new_in_mem::(); + let mut overlay = OverlayedChanges::::default(); + overlay.start_transaction(); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); + overlay.commit_transaction().unwrap(); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); + + { + let mut ext = Ext::new(&mut overlay, &backend, None); + let child_root = "c02965e1df4dc5baf6977390ce67dab1d7a9b27a87c1afe27b50d29cc990e0f5"; + let root = "eafb765909c3ed5afd92a0c564acf4620d0234b31702e8e8e9b48da72a748838"; + + assert_eq!( + bytes2hex("", &ext.child_storage_root(child_info, state_version)), + child_root, + ); + + assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root); + + // Calling a second time should use it from the cache + assert_eq!( + bytes2hex("", &ext.child_storage_root(child_info, state_version)), + child_root, + ); + } } #[test] fn extrinsic_changes_are_collected() { - let mut overlay = OverlayedChanges::default(); + let mut overlay = OverlayedChanges::::default(); overlay.set_collect_extrinsics(true); overlay.start_transaction(); @@ -919,7 +1072,7 @@ mod tests { #[test] fn next_storage_key_change_works() { - let mut overlay = OverlayedChanges::default(); + let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); overlay.set_storage(vec![20], Some(vec![20])); overlay.set_storage(vec![30], Some(vec![30])); @@ -960,7 +1113,7 @@ mod tests { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; let child = child_info.storage_key(); - let mut overlay = OverlayedChanges::default(); + let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); overlay.set_child_storage(child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child_info, vec![30], Some(vec![30])); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 78fec43cd7ec7..0eb7b6d1118f9 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -23,8 +23,8 @@ use std::{ }; use crate::{ - backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, - StorageTransactionCache, StorageValue, TrieBackendBuilder, + backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, StorageValue, + TrieBackendBuilder, }; use hash_db::{HashDB, Hasher}; @@ -36,7 +36,7 @@ use sp_core::{ }, }; use sp_externalities::{Extension, ExtensionStore, Extensions}; -use sp_trie::StorageProof; +use sp_trie::{PrefixedMemoryDB, StorageProof}; /// Simple HashMap-based Externalities impl. pub struct TestExternalities @@ -45,10 +45,8 @@ where H::Out: codec::Codec + Ord, { /// The overlay changed storage. - overlay: OverlayedChanges, + overlay: OverlayedChanges, offchain_db: TestPersistentOffchainDB, - storage_transaction_cache: - StorageTransactionCache< as Backend>::Transaction, H>, /// Storage backend. pub backend: InMemoryBackend, /// Extensions. @@ -64,12 +62,7 @@ where { /// Get externalities implementation. pub fn ext(&mut self) -> Ext> { - Ext::new( - &mut self.overlay, - &mut self.storage_transaction_cache, - &self.backend, - Some(&mut self.extensions), - ) + Ext::new(&mut self.overlay, &self.backend, Some(&mut self.extensions)) } /// Create a new instance of `TestExternalities` with storage. @@ -112,13 +105,12 @@ where offchain_db, extensions: Default::default(), backend, - storage_transaction_cache: Default::default(), state_version, } } /// Returns the overlayed changes. - pub fn overlayed_changes(&self) -> &OverlayedChanges { + pub fn overlayed_changes(&self) -> &OverlayedChanges { &self.overlay } @@ -164,26 +156,51 @@ where /// /// This can be used as a fast way to restore the storage state from a backup because the trie /// does not need to be computed. - pub fn from_raw_snapshot(&mut self, raw_storage: Vec<(H::Out, Vec)>, storage_root: H::Out) { - for (k, v) in raw_storage { - self.backend.backend_storage_mut().emplace(k, hash_db::EMPTY_PREFIX, v); + pub fn from_raw_snapshot( + raw_storage: Vec<(Vec, (Vec, i32))>, + storage_root: H::Out, + state_version: StateVersion, + ) -> Self { + let mut backend = PrefixedMemoryDB::default(); + + for (key, (v, ref_count)) in raw_storage { + let mut hash = H::Out::default(); + let hash_len = hash.as_ref().len(); + + if key.len() < hash_len { + log::warn!("Invalid key in `from_raw_snapshot`: {key:?}"); + continue + } + + hash.as_mut().copy_from_slice(&key[(key.len() - hash_len)..]); + + // Each time .emplace is called the internal MemoryDb ref count increments. + // Repeatedly call emplace to initialise the ref count to the correct value. + for _ in 0..ref_count { + backend.emplace(hash, (&key[..(key.len() - hash_len)], None), v.clone()); + } + } + + Self { + backend: TrieBackendBuilder::new(backend, storage_root).build(), + overlay: Default::default(), + offchain_db: Default::default(), + extensions: Default::default(), + state_version, } - self.backend.set_root(storage_root); } /// Drains the underlying raw storage key/values and returns the root hash. /// /// Useful for backing up the storage in a format that can be quickly re-loaded. - /// - /// Note: This DB will be inoperable after this call. - pub fn into_raw_snapshot(mut self) -> (Vec<(H::Out, Vec)>, H::Out) { + pub fn into_raw_snapshot(mut self) -> (Vec<(Vec, (Vec, i32))>, H::Out) { let raw_key_values = self .backend .backend_storage_mut() .drain() .into_iter() - .map(|(k, v)| (k, v.0)) - .collect::)>>(); + .filter(|(_, (_, r))| *r > 0) + .collect::, (Vec, i32))>>(); (raw_key_values, *self.backend.root()) } @@ -213,11 +230,7 @@ where /// /// This will panic if there are still open transactions. pub fn commit_all(&mut self) -> Result<(), String> { - let changes = self.overlay.drain_storage_changes::<_, _>( - &self.backend, - &mut Default::default(), - self.state_version, - )?; + let changes = self.overlay.drain_storage_changes(&self.backend, self.state_version)?; self.backend .apply_transaction(changes.transaction_storage_root, changes.transaction); @@ -241,12 +254,8 @@ where let proving_backend = TrieBackendBuilder::wrap(&self.backend) .with_recorder(Default::default()) .build(); - let mut proving_ext = Ext::new( - &mut self.overlay, - &mut self.storage_transaction_cache, - &proving_backend, - Some(&mut self.extensions), - ); + let mut proving_ext = + Ext::new(&mut self.overlay, &proving_backend, Some(&mut self.extensions)); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); let proof = proving_backend.extract_proof().expect("Failed to extract storage proof"); @@ -384,7 +393,7 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = array_bytes::hex_n_into_unchecked::( + let root = array_bytes::hex_n_into_unchecked::<_, H256, 32>( "ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489", ); assert_eq!(H256::from_slice(ext.storage_root(Default::default()).as_slice()), root); @@ -402,14 +411,25 @@ mod tests { original_ext.insert_child(child_info.clone(), b"cattytown".to_vec(), b"is_dark".to_vec()); original_ext.insert_child(child_info.clone(), b"doggytown".to_vec(), b"is_sunny".to_vec()); + // Apply the backend to itself again to increase the ref count of all nodes. + original_ext.backend.apply_transaction( + *original_ext.backend.root(), + original_ext.backend.clone().into_storage(), + ); + + // Ensure all have the correct ref counrt + assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); + // Drain the raw storage and root. let root = *original_ext.backend.root(); let (raw_storage, storage_root) = original_ext.into_raw_snapshot(); // Load the raw storage and root into a new TestExternalities. - let mut recovered_ext = - TestExternalities::::from((Default::default(), Default::default())); - recovered_ext.from_raw_snapshot(raw_storage, storage_root); + let recovered_ext = TestExternalities::::from_raw_snapshot( + raw_storage, + storage_root, + Default::default(), + ); // Check the storage root is the same as the original assert_eq!(root, *recovered_ext.backend.root()); @@ -428,6 +448,9 @@ mod tests { recovered_ext.backend.child_storage(&child_info, b"doggytown").unwrap(), Some(b"is_sunny".to_vec()) ); + + // Ensure all have the correct ref count after importing + assert!(recovered_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); } #[test] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index abd58b383969a..cc7132181f90a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -24,61 +24,143 @@ use crate::{ trie_backend_essence::{RawIter, TrieBackendEssence, TrieBackendStorage}, Backend, StorageKey, StorageValue, }; + use codec::Codec; #[cfg(feature = "std")] use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; +use sp_trie::PrefixedMemoryDB; #[cfg(feature = "std")] -use sp_trie::{cache::LocalTrieCache, recorder::Recorder}; -#[cfg(feature = "std")] -use sp_trie::{MemoryDB, StorageProof}; - -/// Dummy type to be used in `no_std`. -/// -/// This is required to have the type available for [`TrieBackendBuilder`] and [`TrieBackend`]. +use sp_trie::{ + cache::{LocalTrieCache, TrieCache}, + recorder::Recorder, + MemoryDB, StorageProof, +}; #[cfg(not(feature = "std"))] -pub struct LocalTrieCache(sp_std::marker::PhantomData); +use sp_trie::{Error, NodeCodec}; +use trie_db::TrieCache as TrieCacheT; +#[cfg(not(feature = "std"))] +use trie_db::{node::NodeOwned, CachedValue}; -/// Special trait to support taking the [`LocalTrieCache`] by value or by reference. -/// -/// This trait is internal use only and to emphasize this, the trait is sealed. -pub trait AsLocalTrieCache: sealed::Sealed { - /// Returns `self` as [`LocalTrieCache`]. - #[cfg(feature = "std")] - fn as_local_trie_cache(&self) -> &LocalTrieCache; +/// A provider of trie caches that are compatible with [`trie_db::TrieDB`]. +pub trait TrieCacheProvider { + /// Cache type that implements [`trie_db::TrieCache`]. + type Cache<'a>: TrieCacheT> + 'a + where + Self: 'a; + + /// Return a [`trie_db::TrieDB`] compatible cache. + /// + /// The `storage_root` parameter should be the storage root of the used trie. + fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_>; + + /// Returns a cache that can be used with a [`trie_db::TrieDBMut`]. + /// + /// When finished with the operation on the trie, it is required to call [`Self::merge`] to + /// merge the cached items for the correct `storage_root`. + fn as_trie_db_mut_cache(&self) -> Self::Cache<'_>; + + /// Merge the cached data in `other` into the provider using the given `new_root`. + /// + /// This must be used for the cache returned by [`Self::as_trie_db_mut_cache`] as otherwise the + /// cached data is just thrown away. + fn merge<'a>(&'a self, other: Self::Cache<'a>, new_root: H::Out); } -impl AsLocalTrieCache for LocalTrieCache { - #[cfg(feature = "std")] - #[inline] - fn as_local_trie_cache(&self) -> &LocalTrieCache { - self +#[cfg(feature = "std")] +impl TrieCacheProvider for LocalTrieCache { + type Cache<'a> = TrieCache<'a, H> where H: 'a; + + fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { + self.as_trie_db_cache(storage_root) + } + + fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { + self.as_trie_db_mut_cache() + } + + fn merge<'a>(&'a self, other: Self::Cache<'a>, new_root: H::Out) { + other.merge_into(self, new_root) } } #[cfg(feature = "std")] -impl AsLocalTrieCache for &LocalTrieCache { - #[inline] - fn as_local_trie_cache(&self) -> &LocalTrieCache { - self +impl TrieCacheProvider for &LocalTrieCache { + type Cache<'a> = TrieCache<'a, H> where Self: 'a; + + fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { + (*self).as_trie_db_cache(storage_root) + } + + fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { + (*self).as_trie_db_mut_cache() + } + + fn merge<'a>(&'a self, other: Self::Cache<'a>, new_root: H::Out) { + other.merge_into(self, new_root) } } -/// Special module that contains the `Sealed` trait. -mod sealed { - use super::*; +/// Cache provider that allows construction of a [`TrieBackend`] and satisfies the requirements, but +/// can never be instantiated. +#[cfg(not(feature = "std"))] +pub struct UnimplementedCacheProvider { + // Not strictly necessary, but the H bound allows to use this as a drop-in + // replacement for the `LocalTrieCache` in no-std contexts. + _phantom: core::marker::PhantomData, + // Statically prevents construction. + _infallible: core::convert::Infallible, +} + +#[cfg(not(feature = "std"))] +impl trie_db::TrieCache> for UnimplementedCacheProvider { + fn lookup_value_for_key(&mut self, _key: &[u8]) -> Option<&CachedValue> { + unimplemented!() + } - /// A special trait which prevents externals to implement the [`AsLocalTrieCache`] outside - /// of this crate. - pub trait Sealed {} + fn cache_value_for_key(&mut self, _key: &[u8], _value: CachedValue) { + unimplemented!() + } - impl Sealed for LocalTrieCache {} - impl Sealed for &LocalTrieCache {} + fn get_or_insert_node( + &mut self, + _hash: H::Out, + _fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, + ) -> trie_db::Result<&NodeOwned, H::Out, Error> { + unimplemented!() + } + + fn get_node(&mut self, _hash: &H::Out) -> Option<&NodeOwned> { + unimplemented!() + } } +#[cfg(not(feature = "std"))] +impl TrieCacheProvider for UnimplementedCacheProvider { + type Cache<'a> = UnimplementedCacheProvider where H: 'a; + + fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { + unimplemented!() + } + + fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { + unimplemented!() + } + + fn merge<'a>(&'a self, _other: Self::Cache<'a>, _new_root: ::Out) { + unimplemented!() + } +} + +#[cfg(feature = "std")] +type DefaultCache = LocalTrieCache; + +#[cfg(not(feature = "std"))] +type DefaultCache = UnimplementedCacheProvider; + /// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder, H: Hasher, C = LocalTrieCache> { +pub struct TrieBackendBuilder, H: Hasher, C = DefaultCache> { storage: S, root: H::Out, #[cfg(feature = "std")] @@ -86,7 +168,7 @@ pub struct TrieBackendBuilder, H: Hasher, C = LocalTrie cache: Option, } -impl TrieBackendBuilder> +impl TrieBackendBuilder> where S: TrieBackendStorage, H: Hasher, @@ -108,6 +190,16 @@ where S: TrieBackendStorage, H: Hasher, { + /// Create a new builder instance. + pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self { + Self { + storage, + root, + #[cfg(feature = "std")] + recorder: None, + cache: Some(cache), + } + } /// Wrap the given [`TrieBackend`]. /// /// This can be used for example if all accesses to the trie should @@ -121,10 +213,7 @@ where root: *other.essence.root(), #[cfg(feature = "std")] recorder: None, - #[cfg(feature = "std")] cache: other.essence.trie_node_cache.as_ref(), - #[cfg(not(feature = "std"))] - cache: None, } } @@ -141,23 +230,23 @@ where } /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { TrieBackendBuilder { cache, root: self.root, storage: self.storage, + #[cfg(feature = "std")] recorder: self.recorder, } } /// Use the given `cache` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { TrieBackendBuilder { cache: Some(cache), root: self.root, storage: self.storage, + #[cfg(feature = "std")] recorder: self.recorder, } } @@ -179,10 +268,8 @@ where /// Build the configured [`TrieBackend`]. #[cfg(not(feature = "std"))] pub fn build(self) -> TrieBackend { - let _ = self.cache; - TrieBackend { - essence: TrieBackendEssence::new(self.storage, self.root), + essence: TrieBackendEssence::new_with_cache(self.storage, self.root, self.cache), next_storage_key_cache: Default::default(), } } @@ -223,12 +310,13 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) - } /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, C = LocalTrieCache> { +pub struct TrieBackend, H: Hasher, C = DefaultCache> { pub(crate) essence: TrieBackendEssence, next_storage_key_cache: CacheCell>>, } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> TrieBackend +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> + TrieBackend where H::Out: Codec, { @@ -276,7 +364,7 @@ where } } -impl, H: Hasher, C: AsLocalTrieCache> sp_std::fmt::Debug +impl, H: Hasher, C: TrieCacheProvider> sp_std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { @@ -284,13 +372,12 @@ impl, H: Hasher, C: AsLocalTrieCache> sp_std::fmt::D } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> Backend +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = crate::DefaultError; - type Transaction = S::Overlay; type TrieBackendStorage = S; type RawIter = crate::trie_backend_essence::RawIter; @@ -371,7 +458,7 @@ where &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, Self::Transaction) + ) -> (H::Out, PrefixedMemoryDB) where H::Out: Ord, { @@ -383,7 +470,7 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, Self::Transaction) + ) -> (H::Out, bool, PrefixedMemoryDB) where H::Out: Ord, { @@ -442,7 +529,7 @@ pub mod tests { use sp_trie::{ cache::{CacheSize, SharedTrieCache}, trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1}, - KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut, + KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieCache, TrieMut, }; use std::iter; use trie_db::NodeCodec; @@ -1100,7 +1187,7 @@ pub mod tests { (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = new_in_mem::>(); + let in_memory = new_in_mem::(); let in_memory = in_memory.update(contents, state_version); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory @@ -1205,7 +1292,7 @@ pub mod tests { .collect(), ), ]; - let in_memory = new_in_mem::>(); + let in_memory = new_in_mem::(); let in_memory = in_memory.update(contents, state_version); let child_storage_keys = vec![child_info_1.to_owned()]; let in_memory_root = in_memory @@ -1393,7 +1480,7 @@ pub mod tests { (Some(child_info_1.clone()), vec![(key.clone(), Some(child_trie_1_val.clone()))]), (Some(child_info_2.clone()), vec![(key.clone(), Some(child_trie_2_val.clone()))]), ]; - let in_memory = new_in_mem::>(); + let in_memory = new_in_mem::(); let in_memory = in_memory.update(contents, state_version); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 1f6d71b2dce80..4bb51f4a13437 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,8 +19,8 @@ //! from storage. use crate::{ - backend::{Consolidate, IterArgs, StorageIterator}, - trie_backend::AsLocalTrieCache, + backend::{IterArgs, StorageIterator}, + trie_backend::TrieCacheProvider, warn, StorageKey, StorageValue, }; use codec::Codec; @@ -35,11 +35,11 @@ use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, read_child_trie_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, - DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBRawIterator, TrieRecorder, + DBValue, KeySpacedDB, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, + TrieRecorder, }; #[cfg(feature = "std")] use std::{collections::HashMap, sync::Arc}; - // In this module, we only use layout for read operation and empty root, // where V1 and V0 are equivalent. use sp_trie::LayoutV1 as Layout; @@ -100,7 +100,7 @@ where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, - C: AsLocalTrieCache + Send + Sync, + C: TrieCacheProvider + Send + Sync, { #[inline] fn prepare( @@ -160,7 +160,7 @@ where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, - C: AsLocalTrieCache + Send + Sync, + C: TrieCacheProvider + Send + Sync, { type Backend = crate::TrieBackend; type Error = crate::DefaultError; @@ -209,29 +209,28 @@ pub struct TrieBackendEssence, H: Hasher, C> { empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, - #[cfg(feature = "std")] pub(crate) trie_node_cache: Option, #[cfg(feature = "std")] pub(crate) recorder: Option>, - #[cfg(not(feature = "std"))] - _phantom: PhantomData, } impl, H: Hasher, C> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { + Self::new_with_cache(storage, root, None) + } + + /// Create new trie-based backend. + pub fn new_with_cache(storage: S, root: H::Out, cache: Option) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), - #[cfg(feature = "std")] - trie_node_cache: None, + trie_node_cache: cache, #[cfg(feature = "std")] recorder: None, - #[cfg(not(feature = "std"))] - _phantom: PhantomData, } } @@ -289,11 +288,10 @@ impl, H: Hasher, C> TrieBackendEssence { } } -impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEssence { +impl, H: Hasher, C: TrieCacheProvider> TrieBackendEssence { /// Call the given closure passing it the recorder and the cache. /// /// If the given `storage_root` is `None`, `self.root` will be used. - #[cfg(feature = "std")] #[inline] fn with_recorder_and_cache( &self, @@ -304,32 +302,23 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> R, ) -> R { let storage_root = storage_root.unwrap_or_else(|| self.root); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - - let mut cache = self - .trie_node_cache - .as_ref() - .map(|c| c.as_local_trie_cache().as_trie_db_cache(storage_root)); + let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); - callback(recorder, cache) - } + #[cfg(feature = "std")] + { + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; + callback(recorder, cache) + } - #[cfg(not(feature = "std"))] - #[inline] - fn with_recorder_and_cache( - &self, - _: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { - callback(None, None) + #[cfg(not(feature = "std"))] + { + callback(None, cache) + } } /// Call the given closure passing it the recorder and the cache. @@ -356,12 +345,12 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss }; let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { - let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); + let mut cache = local_cache.as_trie_db_mut_cache(); let (new_root, r) = callback(recorder, Some(&mut cache)); if let Some(new_root) = new_root { - cache.merge_into(local_cache.as_local_trie_cache(), new_root); + local_cache.merge(cache, new_root); } r @@ -375,17 +364,29 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss #[cfg(not(feature = "std"))] fn with_recorder_and_cache_for_storage_root( &self, - _: Option, + _storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, ) -> (Option, R), ) -> R { - callback(None, None).1 + if let Some(local_cache) = self.trie_node_cache.as_ref() { + let mut cache = local_cache.as_trie_db_mut_cache(); + + let (new_root, r) = callback(None, Some(&mut cache)); + + if let Some(new_root) = new_root { + local_cache.merge(cache, new_root); + } + + r + } else { + callback(None, None).1 + } } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> TrieBackendEssence where H::Out: Codec + Ord, @@ -621,8 +622,8 @@ where &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, S::Overlay) { - let mut write_overlay = S::Overlay::default(); + ) -> (H::Out, PrefixedMemoryDB) { + let mut write_overlay = PrefixedMemoryDB::default(); let root = self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); @@ -654,11 +655,11 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, S::Overlay) { + ) -> (H::Out, bool, PrefixedMemoryDB) { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; - let mut write_overlay = S::Overlay::default(); + let mut write_overlay = PrefixedMemoryDB::default(); let child_root = match self.child_root(child_info) { Ok(Some(hash)) => hash, Ok(None) => default_root, @@ -707,7 +708,7 @@ where pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { storage: &'a S, - overlay: &'a mut S::Overlay, + overlay: &'a mut PrefixedMemoryDB, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB @@ -722,7 +723,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { + pub fn new(storage: &'a S, overlay: &'a mut PrefixedMemoryDB) -> Self { Ephemeral { storage, overlay } } } @@ -768,16 +769,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Eph /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorage: Send + Sync { - /// Type of in-memory overlay. - type Overlay: HashDB + Default + Consolidate; - /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } impl, H: Hasher> TrieBackendStorage for &T { - type Overlay = T::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { (*self).get(key, prefix) } @@ -786,8 +782,6 @@ impl, H: Hasher> TrieBackendStorage for &T { // This implementation is used by normal storage trie clients. #[cfg(feature = "std")] impl TrieBackendStorage for Arc> { - type Overlay = sp_trie::PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(std::ops::Deref::deref(self), key, prefix) } @@ -798,15 +792,13 @@ where H: Hasher, KF: sp_trie::KeyFunction + Send + Sync, { - type Overlay = Self; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> AsHashDB - for TrieBackendEssence +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> + AsHashDB for TrieBackendEssence { fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self @@ -816,7 +808,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> HashDB +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { @@ -849,7 +841,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> +impl, H: Hasher, C: TrieCacheProvider + Send + Sync> HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index 5aa4d833637cf..c3e620ab22c2d 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -13,17 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../runtime-interface" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../application-crypto" } +sp-runtime-interface = { version = "17.0.0", default-features = false, path = "../runtime-interface" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../externalities" } thiserror = { version = "1.0", optional = true } -log = { version = "0.4.17", optional = true } + +# ECIES dependencies +ed25519-dalek = { version = "1.0", optional = true } +x25519-dalek = { version = "2.0.0-pre.1", optional = true } +curve25519-dalek = { version = "3.2", optional = true } +aes-gcm = { version = "0.10", optional = true } +hkdf = { version = "0.12.0", optional = true } +sha2 = { version = "0.10.7", optional = true } +rand = { version = "0.8.5", features = ["small_rng"], optional = true } [features] default = ["std"] @@ -37,5 +45,19 @@ std = [ "sp-api/std", "sp-application-crypto/std", "thiserror", - "log", + + "ed25519-dalek", + "x25519-dalek", + "curve25519-dalek", + "aes-gcm", + "hkdf", + "sha2", + "rand", + "sp-externalities/std" +] +serde = [ + "scale-info/serde", + "sp-application-crypto/serde", + "sp-core/serde", + "sp-runtime/serde", ] diff --git a/primitives/statement-store/src/ecies.rs b/primitives/statement-store/src/ecies.rs new file mode 100644 index 0000000000000..4afd583e03c80 --- /dev/null +++ b/primitives/statement-store/src/ecies.rs @@ -0,0 +1,174 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// tag::description[] +//! ECIES encryption scheme using x25519 key exchange and AEAD. +// end::description[] + +use aes_gcm::{aead::Aead, AeadCore, KeyInit}; +use rand::rngs::OsRng; +use sha2::Digest; +use sp_core::crypto::Pair; + +/// x25519 secret key. +pub type SecretKey = x25519_dalek::StaticSecret; +/// x25519 public key. +pub type PublicKey = x25519_dalek::PublicKey; + +/// Encryption or decryption error. +#[derive(Debug, PartialEq, Eq, thiserror::Error)] +pub enum Error { + /// Generic AES encryption error. + #[error("Encryption error")] + Encryption, + /// Generic AES decryption error. + #[error("Decryption error")] + Decryption, + /// Error reading key data. Not enough data in the buffer. + #[error("Bad cypher text")] + BadData, +} + +const NONCE_LEN: usize = 12; +const PK_LEN: usize = 32; +const AES_KEY_LEN: usize = 32; + +fn aes_encrypt(key: &[u8; AES_KEY_LEN], nonce: &[u8], plaintext: &[u8]) -> Result, Error> { + let enc = aes_gcm::Aes256Gcm::new(key.into()); + + enc.encrypt(nonce.into(), aes_gcm::aead::Payload { msg: plaintext, aad: b"" }) + .map_err(|_| Error::Encryption) +} + +fn aes_decrypt(key: &[u8; AES_KEY_LEN], nonce: &[u8], ciphertext: &[u8]) -> Result, Error> { + let dec = aes_gcm::Aes256Gcm::new(key.into()); + dec.decrypt(nonce.into(), aes_gcm::aead::Payload { msg: ciphertext, aad: b"" }) + .map_err(|_| Error::Decryption) +} + +fn kdf(shared_secret: &[u8]) -> [u8; AES_KEY_LEN] { + let hkdf = hkdf::Hkdf::::new(None, shared_secret); + let mut aes_key = [0u8; AES_KEY_LEN]; + hkdf.expand(b"", &mut aes_key) + .expect("There's always enough data for derivation. qed."); + aes_key +} + +/// Encrypt `plaintext` with the given public x25519 public key. Decryption can be performed with +/// the matching secret key. +pub fn encrypt_x25519(pk: &PublicKey, plaintext: &[u8]) -> Result, Error> { + let ephemeral_sk = x25519_dalek::StaticSecret::new(OsRng); + let ephemeral_pk = x25519_dalek::PublicKey::from(&ephemeral_sk); + + let mut shared_secret = ephemeral_sk.diffie_hellman(pk).to_bytes().to_vec(); + shared_secret.extend_from_slice(ephemeral_pk.as_bytes()); + + let aes_key = kdf(&shared_secret); + + let nonce = aes_gcm::Aes256Gcm::generate_nonce(OsRng); + let ciphertext = aes_encrypt(&aes_key, &nonce, plaintext)?; + + let mut out = Vec::with_capacity(ciphertext.len() + PK_LEN + NONCE_LEN); + out.extend_from_slice(ephemeral_pk.as_bytes()); + out.extend_from_slice(nonce.as_slice()); + out.extend_from_slice(ciphertext.as_slice()); + + Ok(out) +} + +/// Encrypt `plaintext` with the given ed25519 public key. Decryption can be performed with the +/// matching secret key. +pub fn encrypt_ed25519(pk: &sp_core::ed25519::Public, plaintext: &[u8]) -> Result, Error> { + let ed25519 = curve25519_dalek::edwards::CompressedEdwardsY(pk.0); + let x25519 = ed25519.decompress().ok_or(Error::BadData)?.to_montgomery(); + let montgomery = x25519_dalek::PublicKey::from(x25519.to_bytes()); + encrypt_x25519(&montgomery, plaintext) +} + +/// Decrypt with the given x25519 secret key. +pub fn decrypt_x25519(sk: &SecretKey, encrypted: &[u8]) -> Result, Error> { + if encrypted.len() < PK_LEN + NONCE_LEN { + return Err(Error::BadData) + } + let mut ephemeral_pk: [u8; PK_LEN] = Default::default(); + ephemeral_pk.copy_from_slice(&encrypted[0..PK_LEN]); + let ephemeral_pk = PublicKey::from(ephemeral_pk); + + let mut shared_secret = sk.diffie_hellman(&ephemeral_pk).to_bytes().to_vec(); + shared_secret.extend_from_slice(ephemeral_pk.as_bytes()); + + let aes_key = kdf(&shared_secret); + + let nonce = &encrypted[PK_LEN..PK_LEN + NONCE_LEN]; + aes_decrypt(&aes_key, &nonce, &encrypted[PK_LEN + NONCE_LEN..]) +} + +/// Decrypt with the given ed25519 key pair. +pub fn decrypt_ed25519(pair: &sp_core::ed25519::Pair, encrypted: &[u8]) -> Result, Error> { + let raw = pair.to_raw_vec(); + let hash: [u8; 32] = sha2::Sha512::digest(&raw).as_slice()[..32] + .try_into() + .map_err(|_| Error::Decryption)?; + let secret = x25519_dalek::StaticSecret::from(hash); + decrypt_x25519(&secret, encrypted) +} + +#[cfg(test)] +mod test { + use super::*; + use rand::rngs::OsRng; + use sp_core::crypto::Pair; + + #[test] + fn basic_x25519_encryption() { + let sk = SecretKey::new(OsRng); + let pk = PublicKey::from(&sk); + + let plain_message = b"An important secret message"; + let encrypted = encrypt_x25519(&pk, plain_message).unwrap(); + + let decrypted = decrypt_x25519(&sk, &encrypted).unwrap(); + assert_eq!(plain_message, decrypted.as_slice()); + } + + #[test] + fn basic_ed25519_encryption() { + let (pair, _) = sp_core::ed25519::Pair::generate(); + let pk = pair.into(); + + let plain_message = b"An important secret message"; + let encrypted = encrypt_ed25519(&pk, plain_message).unwrap(); + + let decrypted = decrypt_ed25519(&pair, &encrypted).unwrap(); + assert_eq!(plain_message, decrypted.as_slice()); + } + + #[test] + fn fails_on_bad_data() { + let sk = SecretKey::new(OsRng); + let pk = PublicKey::from(&sk); + + let plain_message = b"An important secret message"; + let encrypted = encrypt_x25519(&pk, plain_message).unwrap(); + + assert_eq!(decrypt_x25519(&sk, &[]), Err(Error::BadData)); + assert_eq!( + decrypt_x25519(&sk, &encrypted[0..super::PK_LEN + super::NONCE_LEN - 1]), + Err(Error::BadData) + ); + } +} diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index e5c642d24e2b3..67e7a7b3896b5 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -49,6 +49,8 @@ pub use store_api::{ Error, NetworkPriority, Result, StatementSource, StatementStore, SubmitResult, }; +#[cfg(feature = "std")] +mod ecies; pub mod runtime_api; #[cfg(feature = "std")] mod store_api; @@ -61,12 +63,17 @@ mod sr25519 { pub type Public = app_sr25519::Public; } -mod ed25519 { +/// Statement-store specific ed25519 crypto primitives. +pub mod ed25519 { mod app_ed25519 { use sp_application_crypto::{app_crypto, ed25519, key_types::STATEMENT}; app_crypto!(ed25519, STATEMENT); } + /// Statement-store specific ed25519 public key. pub type Public = app_ed25519::Public; + /// Statement-store specific ed25519 key pair. + #[cfg(feature = "std")] + pub type Pair = app_ed25519::Pair; } mod ecdsa { @@ -507,6 +514,28 @@ impl Statement { } output } + + /// Encrypt give data with given key and store both in the statements. + #[cfg(feature = "std")] + pub fn encrypt( + &mut self, + data: &[u8], + key: &sp_core::ed25519::Public, + ) -> core::result::Result<(), ecies::Error> { + let encrypted = ecies::encrypt_ed25519(key, data)?; + self.data = Some(encrypted); + self.decryption_key = Some((*key).into()); + Ok(()) + } + + /// Decrypt data (if any) with the given private key. + #[cfg(feature = "std")] + pub fn decrypt_private( + &self, + key: &sp_core::ed25519::Pair, + ) -> core::result::Result>, ecies::Error> { + self.data.as_ref().map(|d| ecies::decrypt_ed25519(key, d)).transpose() + } } #[cfg(test)] @@ -615,4 +644,18 @@ mod test { statement.remove_proof(); assert_eq!(statement.verify_signature(), SignatureVerificationResult::NoSignature); } + + #[test] + fn encrypt_decrypt() { + let mut statement = Statement::new(); + let (pair, _) = sp_core::ed25519::Pair::generate(); + let plain = b"test data".to_vec(); + + //let sr25519_kp = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + statement.encrypt(&plain, &pair.public()).unwrap(); + assert_ne!(plain.as_slice(), statement.data().unwrap().as_slice()); + + let decrypted = statement.decrypt_private(&pair).unwrap(); + assert_eq!(decrypted, Some(plain)); + } } diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index 87ab1a46d8777..d16b15f0e6189 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "5.0.0" +version = "8.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c8271669f6721..412a2b15e0599 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "7.0.0" +version = "13.0.0" authors = ["Parity Technologies "] edition = "2021" description = "Storage related primitives" @@ -14,12 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } -sp-debug-derive = { version = "5.0.0", default-features = false, path = "../debug-derive" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } +sp-debug-derive = { version = "8.0.0", default-features = false, path = "../debug-derive" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index aa1bc8e305c9b..f8dc40f051c21 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -205,6 +205,8 @@ pub mod well_known_keys { pub const EXTRINSIC_INDEX: &[u8] = b":extrinsic_index"; /// Current intra-block entropy (a universally unique `[u8; 32]` value) is stored here. + /// + /// Encodes to `0x3a696e747261626c6f636b5f656e74726f7079`. pub const INTRABLOCK_ENTROPY: &[u8] = b":intrablock_entropy"; /// Prefix of child storage keys. diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 77df69b9d1455..8a1ae4d01e405 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -12,13 +12,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive"], optional = true } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +serde = { version = "1.0.163", default-features = false, features = ["derive"], optional = true } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../application-crypto" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = [ diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 913cb762d92a5..82bdb6967b842 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -71,7 +71,7 @@ pub type Hash = H256; /// The block number type used in this runtime. pub type BlockNumber = u64; /// Index of a transaction. -pub type Index = u64; +pub type Nonce = u64; /// The item of a block digest. pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 27e306040f654..731a6a1b5d143 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -14,21 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -futures-timer = { version = "3.0.2", optional = true } -log = { version = "0.4.17", optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = [ "std" ] std = [ "async-trait", "codec/std", - "futures-timer", - "log", "sp-inherents/std", "sp-runtime/std", "sp-std/std", diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 602f83c7b4939..1ef0a0e73ac14 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tracing" -version = "6.0.0" +version = "10.0.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2021" @@ -18,8 +18,8 @@ features = ["with-tracing"] targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] [dependencies] -sp-std = { version = "5.0.0", path = "../std", default-features = false } -codec = { version = "3.2.2", package = "parity-scale-codec", default-features = false, features = [ +sp-std = { version = "8.0.0", path = "../std", default-features = false } +codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = [ "derive", ] } tracing = { version = "0.1.29", default-features = false } diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 63b34a10cd09f..d65db6a1d32fe 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index 72d3175a577cf..b687f63c2fe2d 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -14,25 +14,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -log = { version = "0.4.17", optional = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -sp-core = { version = "7.0.0", optional = true, path = "../core" } +sp-core = { version = "21.0.0", optional = true, path = "../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-trie = { version = "7.0.0", optional = true, path = "../trie" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-trie = { version = "22.0.0", optional = true, path = "../trie" } [features] default = [ "std" ] std = [ "async-trait", "codec/std", - "log", "scale-info/std", - "sp-core", "sp-inherents/std", "sp-runtime/std", "sp-std/std", "sp-trie/std", + "sp-core/std" ] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index be02303003fd0..546d6786fc632 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "7.0.0" +version = "22.0.0" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -19,7 +19,7 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hashbrown = { version = "0.13.2", optional = true } hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } @@ -31,16 +31,16 @@ thiserror = { version = "1.0.30", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.27.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } schnellru = { version = "0.2.1", optional = true } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" criterion = "0.4.0" trie-bench = "0.37.0" trie-standardmap = "0.16.0" -sp-runtime = { version = "7.0.0", path = "../runtime" } +sp-runtime = { version = "24.0.0", path = "../runtime" } [features] default = ["std"] @@ -61,4 +61,5 @@ std = [ "tracing", "trie-db/std", "trie-root/std", + "sp-runtime/std" ] diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 0100e2876e9a9..01f08a78adcf2 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -91,9 +91,13 @@ const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024; const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024; /// The maximum size of the memory allocated on the heap by the local cache, in bytes. -const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024; +/// +/// The size of the node cache should always be bigger than the value cache. The value +/// cache is only holding weak references to the actual values found in the nodes and +/// we account for the size of the node as part of the node cache. +const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024; /// Same as [`LOCAL_NODE_CACHE_MAX_HEAP_SIZE`]. -const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 4 * 1024 * 1024; +const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024; /// The size of the shared cache. #[derive(Debug, Clone, Copy)] @@ -525,7 +529,7 @@ impl<'a, H: Hasher> TrieCache<'a, H> { /// `storage_root` is the new storage root that was obtained after finishing all operations /// using the [`TrieDBMut`](trie_db::TrieDBMut). pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { - let cache = if let ValueCache::Fresh(cache) = self.value_cache { cache } else { return }; + let ValueCache::Fresh(cache) = self.value_cache else { return }; if !cache.is_empty() { let mut value_cache = local.value_cache.lock(); diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 9cebb8f01d82e..fc59a367ba385 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "5.0.0" +version = "22.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -14,16 +14,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } thiserror = { version = "1.0.30", optional = true } -sp-core-hashing-proc-macro = { version = "5.0.0", path = "../core/hashing/proc-macro" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } -sp-version-proc-macro = { version = "4.0.0-dev", default-features = false, path = "proc-macro" } +sp-core-hashing-proc-macro = { version = "9.0.0", path = "../core/hashing/proc-macro" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } +sp-version-proc-macro = { version = "8.0.0", default-features = false, path = "proc-macro" } [features] default = ["std"] diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml index aac41dd43dd08..33c14c9e715e4 100644 --- a/primitives/version/proc-macro/Cargo.toml +++ b/primitives/version/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version-proc-macro" -version = "4.0.0-dev" +version = "8.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full", "fold", "extra-traits", "visit"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full", "fold", "extra-traits", "visit"] } [dev-dependencies] -sp-version = { version = "5.0.0", path = ".." } +sp-version = { version = "22.0.0", path = ".." } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index c45ea75830ef7..50efd6d9d22d7 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "7.0.0" +version = "14.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -14,15 +14,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } -wasmi = { version = "0.13.2", optional = true } -wasmtime = { version = "6.0.2", default-features = false, optional = true } +wasmtime = { version = "8.0.1", default-features = false, optional = true } anyhow = { version = "1.0.68", optional = true } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] -default = [ "std" ] -std = [ "codec/std", "log", "sp-std/std", "wasmi", "wasmtime" ] -wasmtime = [ "dep:wasmtime", "anyhow" ] +default = ["std"] +std = ["codec/std", "log", "sp-std/std", "wasmtime"] +wasmtime = ["dep:wasmtime", "anyhow"] diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index b096d236c01eb..9d5d2bb358d57 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -21,9 +21,6 @@ use sp_std::{borrow::Cow, iter::Iterator, marker::PhantomData, mem, result, vec, vec::Vec}; -#[cfg(feature = "wasmi")] -mod wasmi_impl; - #[cfg(not(all(feature = "std", feature = "wasmtime")))] #[macro_export] macro_rules! if_wasmtime_is_enabled { diff --git a/primitives/wasm-interface/src/wasmi_impl.rs b/primitives/wasm-interface/src/wasmi_impl.rs deleted file mode 100644 index 7394e34551305..0000000000000 --- a/primitives/wasm-interface/src/wasmi_impl.rs +++ /dev/null @@ -1,80 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of conversions between Substrate and wasmi types. -use crate::{Signature, Value, ValueType}; -use sp_std::vec::Vec; - -impl From for wasmi::RuntimeValue { - fn from(value: Value) -> Self { - match value { - Value::I32(val) => Self::I32(val), - Value::I64(val) => Self::I64(val), - Value::F32(val) => Self::F32(val.into()), - Value::F64(val) => Self::F64(val.into()), - } - } -} - -impl From for Value { - fn from(value: wasmi::RuntimeValue) -> Self { - match value { - wasmi::RuntimeValue::I32(val) => Self::I32(val), - wasmi::RuntimeValue::I64(val) => Self::I64(val), - wasmi::RuntimeValue::F32(val) => Self::F32(val.into()), - wasmi::RuntimeValue::F64(val) => Self::F64(val.into()), - } - } -} - -impl From for wasmi::ValueType { - fn from(value: ValueType) -> Self { - match value { - ValueType::I32 => Self::I32, - ValueType::I64 => Self::I64, - ValueType::F32 => Self::F32, - ValueType::F64 => Self::F64, - } - } -} - -impl From for ValueType { - fn from(value: wasmi::ValueType) -> Self { - match value { - wasmi::ValueType::I32 => Self::I32, - wasmi::ValueType::I64 => Self::I64, - wasmi::ValueType::F32 => Self::F32, - wasmi::ValueType::F64 => Self::F64, - } - } -} - -impl From for wasmi::Signature { - fn from(sig: Signature) -> Self { - let args = sig.args.iter().map(|a| (*a).into()).collect::>(); - wasmi::Signature::new(args, sig.return_value.map(Into::into)) - } -} - -impl From<&wasmi::Signature> for Signature { - fn from(sig: &wasmi::Signature) -> Self { - Signature::new( - sig.params().iter().copied().map(Into::into).collect::>(), - sig.return_type().map(Into::into), - ) - } -} diff --git a/primitives/weights/Cargo.toml b/primitives/weights/Cargo.toml index 534d790884794..05fc58a10e105 100644 --- a/primitives/weights/Cargo.toml +++ b/primitives/weights/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-weights" -version = "4.0.0" +version = "20.0.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -13,14 +13,14 @@ documentation = "https://docs.rs/sp-wasm-interface" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", default-features = false, optional = true, features = ["derive", "alloc"] } -smallvec = "1.8.0" -sp-arithmetic = { version = "6.0.0", default-features = false, path = "../arithmetic" } -sp-core = { version = "7.0.0", default-features = false, path = "../core" } -sp-debug-derive = { version = "5.0.0", default-features = false, path = "../debug-derive" } -sp-std = { version = "5.0.0", default-features = false, path = "../std" } +serde = { version = "1.0.163", default-features = false, optional = true, features = ["derive", "alloc"] } +smallvec = "1.11.0" +sp-arithmetic = { version = "16.0.0", default-features = false, path = "../arithmetic" } +sp-core = { version = "21.0.0", default-features = false, path = "../core" } +sp-debug-derive = { version = "8.0.0", default-features = false, path = "../debug-derive" } +sp-std = { version = "8.0.0", default-features = false, path = "../std" } [features] default = [ "std" ] diff --git a/primitives/weights/src/weight_meter.rs b/primitives/weights/src/weight_meter.rs index ab7b6c63ed383..3b0b21ea8799a 100644 --- a/primitives/weights/src/weight_meter.rs +++ b/primitives/weights/src/weight_meter.rs @@ -32,18 +32,21 @@ use sp_arithmetic::Perbill; /// /// // The weight is limited to (10, 0). /// let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); -/// // There is enough weight remaining for an operation with (5, 0) weight. -/// assert!(meter.check_accrue(Weight::from_parts(5, 0))); -/// // There is not enough weight remaining for an operation with (6, 0) weight. -/// assert!(!meter.check_accrue(Weight::from_parts(6, 0))); +/// // There is enough weight remaining for an operation with (6, 0) weight. +/// assert!(meter.try_consume(Weight::from_parts(6, 0)).is_ok()); +/// assert_eq!(meter.remaining(), Weight::from_parts(4, 0)); +/// // There is not enough weight remaining for an operation with (5, 0) weight. +/// assert!(!meter.try_consume(Weight::from_parts(5, 0)).is_ok()); +/// // The total limit is obviously unchanged: +/// assert_eq!(meter.limit(), Weight::from_parts(10, 0)); /// ``` #[derive(Debug, Clone)] pub struct WeightMeter { /// The already consumed weight. - pub consumed: Weight, + consumed: Weight, /// The maximal consumable weight. - pub limit: Weight, + limit: Weight, } impl WeightMeter { @@ -57,6 +60,16 @@ impl WeightMeter { Self::from_limit(Weight::MAX) } + /// The already consumed weight. + pub fn consumed(&self) -> Weight { + self.consumed + } + + /// The limit can ever be accrued. + pub fn limit(&self) -> Weight { + self.limit + } + /// The remaining weight that can still be consumed. pub fn remaining(&self) -> Weight { self.limit.saturating_sub(self.consumed) @@ -65,6 +78,28 @@ impl WeightMeter { /// The ratio of consumed weight to the limit. /// /// Calculates one ratio per component and returns the largest. + /// + /// # Example + /// ```rust + /// use sp_weights::{Weight, WeightMeter}; + /// use sp_arithmetic::Perbill; + /// + /// let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 20)); + /// // Nothing consumed so far: + /// assert_eq!(meter.consumed_ratio(), Perbill::from_percent(0)); + /// meter.consume(Weight::from_parts(5, 5)); + /// // The ref-time is the larger ratio: + /// assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); + /// meter.consume(Weight::from_parts(1, 10)); + /// // Now the larger ratio is proof-size: + /// assert_eq!(meter.consumed_ratio(), Perbill::from_percent(75)); + /// // Eventually it reaches 100%: + /// meter.consume(Weight::from_parts(4, 0)); + /// assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); + /// // Saturating the second component won't change anything anymore: + /// meter.consume(Weight::from_parts(0, 5)); + /// assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); + /// ``` pub fn consumed_ratio(&self) -> Perbill { let time = Perbill::from_rational(self.consumed.ref_time(), self.limit.ref_time()); let pov = Perbill::from_rational(self.consumed.proof_size(), self.limit.proof_size()); @@ -72,25 +107,46 @@ impl WeightMeter { } /// Consume some weight and defensively fail if it is over the limit. Saturate in any case. + #[deprecated(note = "Use `consume` instead. Will be removed after December 2023.")] pub fn defensive_saturating_accrue(&mut self, w: Weight) { + self.consume(w); + } + + /// Consume some weight and defensively fail if it is over the limit. Saturate in any case. + pub fn consume(&mut self, w: Weight) { self.consumed.saturating_accrue(w); debug_assert!(self.consumed.all_lte(self.limit), "Weight counter overflow"); } - /// Consume the given weight after checking that it can be consumed. Otherwise do nothing. + /// Consume the given weight after checking that it can be consumed and return `true`. Otherwise + /// do nothing and return `false`. + #[deprecated(note = "Use `try_consume` instead. Will be removed after December 2023.")] pub fn check_accrue(&mut self, w: Weight) -> bool { - self.consumed.checked_add(&w).map_or(false, |test| { + self.try_consume(w).is_ok() + } + + /// Consume the given weight after checking that it can be consumed. + /// + /// Returns `Ok` if the weight can be consumed or otherwise an `Err`. + pub fn try_consume(&mut self, w: Weight) -> Result<(), ()> { + self.consumed.checked_add(&w).map_or(Err(()), |test| { if test.any_gt(self.limit) { - false + Err(()) } else { self.consumed = test; - true + Ok(()) } }) } /// Check if the given weight can be consumed. + #[deprecated(note = "Use `can_consume` instead. Will be removed after December 2023.")] pub fn can_accrue(&self, w: Weight) -> bool { + self.can_consume(w) + } + + /// Check if the given weight can be consumed. + pub fn can_consume(&self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) } } @@ -98,6 +154,7 @@ impl WeightMeter { #[cfg(test)] mod tests { use crate::*; + use sp_arithmetic::traits::Zero; #[test] fn weight_meter_remaining_works() { @@ -179,4 +236,52 @@ mod tests { assert!(meter.check_accrue(Weight::from_parts(0, 4))); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); } + + #[test] + fn try_consume_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); + + assert!(meter.try_consume(Weight::from_parts(11, 0)).is_err()); + assert!(meter.consumed().is_zero(), "No modification"); + + assert!(meter.try_consume(Weight::from_parts(9, 0)).is_ok()); + assert!(meter.try_consume(Weight::from_parts(2, 0)).is_err()); + assert!(meter.try_consume(Weight::from_parts(1, 0)).is_ok()); + assert!(meter.remaining().is_zero()); + assert_eq!(meter.consumed(), Weight::from_parts(10, 0)); + } + + #[test] + fn can_consume_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); + + assert!(!meter.can_consume(Weight::from_parts(11, 0))); + assert!(meter.consumed().is_zero(), "No modification"); + + assert!(meter.can_consume(Weight::from_parts(9, 0))); + meter.consume(Weight::from_parts(9, 0)); + assert!(!meter.can_consume(Weight::from_parts(2, 0))); + assert!(meter.can_consume(Weight::from_parts(1, 0))); + } + + #[test] + #[cfg(debug_assertions)] + fn consume_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(5, 10)); + + meter.consume(Weight::from_parts(4, 0)); + assert_eq!(meter.remaining(), Weight::from_parts(1, 10)); + meter.consume(Weight::from_parts(1, 0)); + assert_eq!(meter.remaining(), Weight::from_parts(0, 10)); + meter.consume(Weight::from_parts(0, 10)); + assert_eq!(meter.consumed(), Weight::from_parts(5, 10)); + } + + #[test] + #[cfg(debug_assertions)] + #[should_panic(expected = "Weight counter overflow")] + fn consume_defensive_fail() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); + let _ = meter.consume(Weight::from_parts(11, 0)); + } } diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index 2aede666dd3da..3946cfe42c8d0 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -97,18 +97,6 @@ impl Weight { } } - /// Construct [`Weight`] with reference time weight and 0 storage size weight. - #[deprecated = "Will be removed soon; use `from_parts` instead."] - pub const fn from_ref_time(ref_time: u64) -> Self { - Self { ref_time, proof_size: 0 } - } - - /// Construct [`Weight`] with storage size weight and 0 reference time weight. - #[deprecated = "Will be removed soon; use `from_parts` instead."] - pub const fn from_proof_size(proof_size: u64) -> Self { - Self { ref_time: 0, proof_size } - } - /// Construct [`Weight`] from weight parts, namely reference time and proof size weights. pub const fn from_parts(ref_time: u64, proof_size: u64) -> Self { Self { ref_time, proof_size } diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index ce6c566d799ab..08c2fe81ada04 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -99,7 +99,7 @@ structure_message() { # access_token: see https://matrix.org/docs/guides/client-server-api/ # Usage: send_message $body (json formatted) $room_id $access_token send_message() { -curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" + curl -XPOST -d "$1" "https://m.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" } # Check for runtime changes between two commits. This is defined as any changes diff --git a/scripts/ci/deny.toml b/scripts/ci/deny.toml index 408a9e55bc40b..5297d07143c22 100644 --- a/scripts/ci/deny.toml +++ b/scripts/ci/deny.toml @@ -96,6 +96,7 @@ exceptions = [ { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, ] # Some crates don't have (easily) machine readable licensing information, diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 0c04abfd5aa30..8f63f6ecc3911 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -15,6 +15,7 @@ variables: RUSTFLAGS: "-D warnings" script: + - cargo install --locked --git https://github.com/paritytech/try-runtime-cli --rev a93c9b5abe5d31a4cf1936204f7e5c489184b521 - git clone --depth=1 --branch="$PIPELINE_SCRIPTS_TAG" @@ -26,10 +27,47 @@ --extra-dependencies "$EXTRA_DEPENDENCIES" --companion-overrides "$COMPANION_OVERRIDES" +.check-runtime-migration: + extends: + - .check-dependent-project + - .test-refs-no-trigger-prs-only + variables: + DEPENDENT_REPO: polkadot + COMPANION_OVERRIDES: | + substrate: polkadot-v* + polkadot: release-v* + COMPANION_CHECK_COMMAND: > + time cargo build --release -p "$NETWORK"-runtime --features try-runtime && + time try-runtime \ + --runtime ./target/release/wbuild/"$NETWORK"-runtime/target/wasm32-unknown-unknown/release/"$NETWORK"_runtime.wasm \ + on-runtime-upgrade --checks=pre-and-post live --uri wss://${NETWORK}-try-runtime-node.parity-chains.parity.io:443 + # Individual jobs are set up for each dependent project so that they can be ran in parallel. # Arguably we could generate a job for each companion in the PR's description using Gitlab's # parent-child pipelines but that's more complicated. +check-runtime-migration-polkadot: + extends: + - .check-runtime-migration + variables: + NETWORK: polkadot + +check-runtime-migration-kusama: + extends: .check-runtime-migration + variables: + NETWORK: kusama + +check-runtime-migration-rococo: + extends: .check-runtime-migration + variables: + NETWORK: rococo + allow_failure: true + +check-runtime-migration-westend: + extends: .check-runtime-migration + variables: + NETWORK: westend + check-dependent-polkadot: extends: .check-dependent-project variables: @@ -77,8 +115,8 @@ build-linux-substrate: - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" script: - rusty-cachier snapshot create - - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release --verbose - - mv $CARGO_TARGET_DIR/release/substrate ./artifacts/substrate/. + - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release -p node-cli --verbose + - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate - echo -n "Substrate version = " - if [ "${CI_COMMIT_TAG}" ]; then echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; @@ -153,7 +191,7 @@ build-rustdoc: - .test-refs variables: SKIP_WASM_BUILD: 1 - DOC_INDEX_PAGE: "sc_service/index.html" # default redirected page + DOC_INDEX_PAGE: "substrate/index.html" # default redirected page # this variable gets overriden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" artifacts: diff --git a/scripts/ci/gitlab/pipeline/check.yml b/scripts/ci/gitlab/pipeline/check.yml index a29f31d4aa3ba..576daec9b4331 100644 --- a/scripts/ci/gitlab/pipeline/check.yml +++ b/scripts/ci/gitlab/pipeline/check.yml @@ -47,6 +47,19 @@ test-rust-features: https://github.com/paritytech/pipeline-scripts - bash ./pipeline-scripts/rust-features.sh . +test-rust-feature-propagation: + stage: check + extends: + - .kubernetes-env + - .test-refs-no-trigger-prs-only + script: + - cargo install --locked --version 0.7.4 -q -f zepter && zepter --version + - echo "👉 Hello developer! If you see this CI check failing then it means that one of the crates is missing a feature for one of its dependencies. The output below tells you which feature needs to be added for which dependency to which crate. You can do this by modifying the Cargo.toml file. For more context see the MR where this check was introduced https://github.com/paritytech/substrate/pull/14660" + - zepter lint propagate-feature --feature try-runtime --left-side-feature-missing=ignore --workspace --feature-enables-dep="try-runtime:frame-try-runtime" --locked + - zepter lint propagate-feature --feature runtime-benchmarks --left-side-feature-missing=ignore --workspace --feature-enables-dep="runtime-benchmarks:frame-benchmarking" --locked + - zepter lint propagate-feature --feature std --left-side-feature-missing=ignore --workspace --locked + allow_failure: true # Experimental + test-prometheus-alerting-rules: stage: check extends: .kubernetes-env diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index f49e36c6a1057..61c1aa876f306 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -37,7 +37,7 @@ cargo-deny-licenses: - echo "___The complete log is in the artifacts___" - $CARGO_DENY_CMD 2> deny.log - if [ $CI_JOB_STATUS != 'success' ]; then - echo 'Please check license of your crate or add an exception to scripts/ci/deny.toml'; + echo 'Please check license of your crate or add an exception to scripts/ci/deny.toml'; fi artifacts: name: $CI_COMMIT_SHORT_SHA @@ -68,8 +68,10 @@ cargo-clippy: - .docker-env - .test-refs script: + - echo $RUSTFLAGS + - cargo version && cargo clippy --version - rusty-cachier snapshot create - - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets --workspace - rusty-cachier cache upload cargo-check-benches: @@ -90,15 +92,15 @@ cargo-check-benches: - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs. skip if base is not master - 'if [ $CI_COMMIT_REF_NAME != "master" ]; then - BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); - printf "Merging base branch %s\n" "${BASE:=master}"; - if [ $BASE != "master" ]; then - echo "$BASE is not master, skipping merge"; - else - git config user.email "ci@gitlab.parity.io"; - git fetch origin "refs/heads/${BASE}"; - git merge --verbose --no-edit FETCH_HEAD; - fi + BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); + printf "Merging base branch %s\n" "${BASE:=master}"; + if [ $BASE != "master" ]; then + echo "$BASE is not master, skipping merge"; + else + git config user.email "ci@gitlab.parity.io"; + git fetch origin "refs/heads/${BASE}"; + git merge --verbose --no-edit FETCH_HEAD; + fi fi' parallel: 2 script: @@ -115,8 +117,8 @@ cargo-check-benches: rusty-cachier cache upload ;; 2) - cargo run --locked --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json + cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::sr25519::transfer_keep_alive::paritydb::small.json ;; esac @@ -155,21 +157,21 @@ node-bench-regression-guard: --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" after_script: [""] -cargo-check-try-runtime: +cargo-check-try-runtime-and-experimental: stage: test extends: - .docker-env - .test-refs script: - rusty-cachier snapshot create - - time cargo check --locked --features try-runtime + - time cargo check --workspace --locked --features try-runtime,experimental - rusty-cachier cache upload test-deterministic-wasm: stage: test # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs needs: - - job: cargo-check-try-runtime + - job: cargo-check-try-runtime-and-experimental artifacts: false extends: - .docker-env @@ -221,7 +223,7 @@ test-linux-stable: --locked --release --verbose - --features runtime-benchmarks + --features runtime-benchmarks,try-runtime,experimental,unsafe-debug --manifest-path ./bin/node/cli/Cargo.toml --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} # we need to update cache only from one job @@ -229,11 +231,12 @@ test-linux-stable: # Upload tests results to Elasticsearch - echo "Upload test results to Elasticsearch" - cat target/nextest/default/junit.xml | xq . > target/nextest/default/junit.json - - "curl -v -XPOST --http1.1 - -u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD} - https://elasticsearch.parity-build.parity.io/unit-tests/_doc/${CI_JOB_ID} - -H 'Content-Type: application/json' - -d @target/nextest/default/junit.json" + - | + curl -v -XPOST --http1.1 \ + -u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD} \ + https://elasticsearch.parity-build.parity.io/unit-tests/_doc/${CI_JOB_ID} \ + -H 'Content-Type: application/json' \ + -d @target/nextest/default/junit.json || echo "failed to upload junit report" artifacts: when: always paths: @@ -258,8 +261,8 @@ test-frame-support: script: - rusty-cachier snapshot create - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true - - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime --manifest-path ./frame/support/test/Cargo.toml - - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime --manifest-path ./frame/support/test/Cargo.toml + - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental --manifest-path ./frame/support/test/Cargo.toml + - time cargo test --verbose --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental --manifest-path ./frame/support/test/Cargo.toml - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true - rusty-cachier cache upload @@ -304,7 +307,7 @@ quick-benchmarks: WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" script: - rusty-cachier snapshot create - - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - time cargo run --locked --release -p node-cli --features runtime-benchmarks -- benchmark pallet --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 - rusty-cachier cache upload test-frame-examples-compile-to-wasm: @@ -456,6 +459,13 @@ cargo-hfuzz: --exit_code_upon_crash 1 --timeout 10 --run_time 60 + # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR + HFUZZ_BUILD_ARGS: > + --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" + --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" artifacts: name: "hfuzz-$CI_COMMIT_SHORT_SHA" expire_in: 7 days @@ -463,15 +473,6 @@ cargo-hfuzz: paths: - primitives/arithmetic/fuzzer/hfuzz_workspace/ script: - # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: - # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian - # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR - - | - cat >>Cargo.toml < String { - let repo = git2::Repository::discover(path) - .expect(&format!("Node template ({}) should be in a git repository.", path.display())); - - let commit_id = repo - .head() - .expect("Repository should have a head") - .peel_to_commit() - .expect("Head references a commit") - .id(); + let mut dir = path; + while !dir.join(".git").exists() { + dir = dir + .parent() + .expect(&format!("Node template ({}) should be in a git repository.", path.display())); + } - format!("{}", commit_id) + let git = dir.join(".git"); + let head = git.join("HEAD"); + let head_contents = fs::read_to_string(head).expect("Repository should have a HEAD"); + let branch = head_contents.strip_prefix("ref: ").expect(".git/HEAD to start 'ref: '").trim(); + let mut commit = fs::read_to_string(git.join(branch)).expect("Head references a commit"); + commit.truncate(commit.trim_end().len()); + commit } /// Rewrites git dependencies: diff --git a/scripts/run_all_benchmarks.sh b/scripts/run_all_benchmarks.sh index 727b49e26afe5..83848100a7e51 100755 --- a/scripts/run_all_benchmarks.sh +++ b/scripts/run_all_benchmarks.sh @@ -119,7 +119,6 @@ for PALLET in "${PALLETS[@]}"; do --repeat=20 \ --pallet="$PALLET" \ --extrinsic="*" \ - --execution=wasm \ --wasm-execution=compiled \ --heap-pages=4096 \ --output="$WEIGHT_FILE" \ @@ -137,7 +136,6 @@ echo "[+] Benchmarking block and extrinsic overheads..." OUTPUT=$( $SUBSTRATE benchmark overhead \ --chain=dev \ - --execution=wasm \ --wasm-execution=compiled \ --weight-path="./frame/support/src/weights/" \ --header="./HEADER-APACHE2" \ diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000000000..16a6067789657 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,297 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # Substrate +//! +//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in +//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. +//! +//! [![github]](https://github.com/paritytech/substrate/) - [![polkadot]](https://polkadot.network) +//! +//! This crate in itself does not contain any code and is just meant ot be a documentation hub for +//! substrate-based crates. +//! +//! ## Overview +//! +//! Substrate approaches blockchain development with an acknowledgement of a few self-evident +//! truths: +//! +//! 1. Society and technology evolves. +//! 2. Humans are fallible. +//! +//! This, specifically, makes the task of designing a correct, safe and long-lasting blockchain +//! system hard. +//! +//! Nonetheless, in order to achieve this goal, substrate embraces the following: +//! +//! 1. Use of **Rust** as a modern, and safe programming language, which limits human error through +//! various means, most notably memory safety. +//! 2. Substrate is written from the ground-up with a generic, modular and extensible design. This +//! ensures that software components can be easily swapped and upgraded. Examples of this is +//! multiple consensus mechanisms provided by Substrate, as listed below. +//! 3. Lastly, the final blockchain system created with the above properties needs to be +//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the +//! application logic of the blockchain (called "Runtime") is encoded as a Wasm blob, and is +//! stored onchain. The rest of the system (called "Client") acts as the executor of the Wasm +//! blob. +//! +//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as Wasm blob" +//! accord. This enables the Runtime to become inherently upgradeable (without forks). The upgrade +//! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, +//! same as updating an account's balance. +//! +//! To learn more about the substrate architecture using some visuals, see [`substrate_diagram`]. +//! +//! `FRAME`, Substrate's default runtime development library takes the above even further by +//! embracing a declarative programming model whereby correctness is enhanced and the system is +//! highly configurable through parameterization. +//! +//! All in all, this design enables all substrate-based chains to achieve forkless, self-enacting +//! upgrades out of the box. Combined with governance abilities that are shipped with `FRAME`, this +//! enables a chain to survive the test of time. +//! +//! ## How to Get Stared +//! +//! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, +//! look into the [`frame_support`] crate, which is the entry point crate into runtime development +//! with FRAME. +//! +//! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an +//! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). +//! +//! In more broad terms, the following avenues exist into developing with substrate: +//! +//! * **Templates**: A number of substrate-based templates exist and they can be used for various +//! purposes, with zero to little additional code needed. All of these templates contain runtimes +//! that are highly configurable and are likely suitable for basic needs. +//! * `FRAME`: If need, one can customize that runtime even further, by using `FRAME` and developing +//! custom modules. +//! * **Core**: To the contrary, some developers may want to customize the client side software to +//! achieve novel goals such as a new consensus engine, or a new database backend. While +//! Substrate's main configurability is in the runtime, the client is also highly generic and can +//! be customized to a great extent. +//! +//! ## Structure +//! +//! Substrate is a massive cargo workspace with hundreds of crates, therefore it is useful to know +//! how to navigate its crates. +//! +//! In broad terms, it is divided into three categories: +//! +//! * `sc-*` (short for *substrate-client*) crates, located under `./client` folder. These are all +//! the client crates. Notable examples are crates such as [`sc-network`], various consensus +//! crates, [`sc-rpc-api`] and [`sc-client-db`], all of which are expected to reside in the client +//! side. +//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These +//! are the traits that glue the client and runtime together, but are not opinionated about what +//! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], +//! which form the communication bridge between the client and runtime, as explained in +//! [`substrate_diagram`]. +//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related +//! to FRAME. See [`frame_support`] for more information. +//! +//! ### Wasm Build +//! +//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both Wasm (when a Wasm +//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate +//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a +//! crate is being built with the standard library, and is built for native. Otherwise, it is built +//! for `no_std`. +//! +//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find +//! in any Substrate-based runtime. +//! +//! Substrate-based runtimes use [`substrate-wasm-builder`] in their `build.rs` to automatically +//! build their Wasm files as a part of normal build commandsOnce built, the wasm file is placed in +//! `./target/{debug|release}/wbuild/{runtime_name}.wasm`. +//! +//! ### Binaries +//! +//! Multiple binaries are shipped with substrate, the most important of which are located in the +//! `./bin` folder. +//! +//! * [`node`] is an extensive substrate node that contains the superset of all runtime and client +//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the +//! modules that are provided with `FRAME`. This node and runtime is only used for testing and +//! demonstration. +//! * [`chain-spec-builder`]: Utility to build more detailed chain-specs for the aforementioned +//! node. Other projects typically contain a `build-spec` subcommand that does the same. +//! * [`node-template`]: a template node that contains a minimal set of features and can act as a +//! starting point of a project. +//! * [`subkey`]: Substrate's key management utility. +//! +//! ### Anatomy of a Binary Crate +//! +//! From the above, [`node`] and [`node-template`] are essentially blueprints of a substrate-based +//! project, as the name of the latter is implying. Each substrate-based project typically contains +//! the following: +//! +//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. +//! This file typically contains the [`frame_support::construct_runtime`] macro, which is the +//! final definition of a runtime. +//! +//! * Under `./node`, a `main.rs`, which is the point, and a `./service.rs`, which contains all the +//! client side components. Skimming this file yields an overview of the networking, database, +//! consensus and similar client side components. +//! +//! > The above two are conventions, not rules. +//! +//! ## Parachain? +//! +//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways +//! through which Polkadot can be utilized is by building "parachains", blockchains that are +//! connected to Polkadot's shared security. +//! +//! To build a parachain, one could use [`Cumulus`](https://github.com/paritytech/cumulus/), the +//! library on top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. +//! +//! ## Where To Go Next? +//! +//! Additional noteworthy crates within substrate: +//! +//! - RPC APIs of a Substrate node: [`sc-rpc-api`]/[`sc-rpc`] +//! - CLI Options of a Substrate node: [`sc-cli`] +//! - All of the consensus related crates provided by Substrate: +//! - [`sc-consensus-aura`] +//! - [`sc-consensus-babe`] +//! - [`sc-consensus-grandpa`] +//! - [`sc-consensus-beefy`] +//! - [`sc-consensus-manual-seal`] +//! - [`sc-consensus-pow`] +//! +//! Additional noteworthy external resources: +//! +//! - [Substrate Developer Hub](https://substrate.dev) +//! - [Parity Tech's Documentation Hub](https://paritytech.github.io/) +//! - [Frontier: Substrate's Ethereum Compatibility Library](https://paritytech.github.io/frontier/) +//! - [Polkadot Wiki](https://wiki.polkadot.network/en/) +//! +//! Notable upstream crates: +//! +//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) +//! - [`parity-db`](https://github.com/paritytech/parity-db) +//! - [`trie`](https://github.com/paritytech/trie) +//! - [`parity-common`](https://github.com/paritytech/parity-common) +//! +//! Templates: +//! +//! - classic [`substrate-node-template`](https://github.com/substrate-developer-hub/substrate-node-template) +//! - classic [cumulus-parachain-template](https://github.com/substrate-developer-hub/substrate-parachain-template) +//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template) +//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template) +//! +//! [polkadot]: +//! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: +//! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [`sp-io`]: ../sp_io/index.html +//! [`sp-api`]: ../sp_api/index.html +//! [`sp-api`]: ../sp_api/index.html +//! [`sc-client-db`]: ../sc_client_db/index.html +//! [`sc-network`]: ../sc_network/index.html +//! [`sc-rpc-api`]: ../sc_rpc_api/index.html +//! [`sc-rpc`]: ../sc_rpc/index.html +//! [`sc-cli`]: ../sc_cli/index.html +//! [`sc-consensus-aura`]: ../sc_consensus_aura/index.html +//! [`sc-consensus-babe`]: ../sc_consensus_babe/index.html +//! [`sc-consensus-grandpa`]: ../sc_consensus_grandpa/index.html +//! [`sc-consensus-beefy`]: ../sc_consensus_beefy/index.html +//! [`sc-consensus-manual-seal`]: ../sc_consensus_manual_seal/index.html +//! [`sc-consensus-pow`]: ../sc_consensus_pow/index.html +//! [`node`]: ../node_cli/index.html +//! [`node-template`]: ../node_template/index.html +//! [`kitchensink_runtime`]: ../kitchensink_runtime/index.html +//! [`subkey`]: ../subkey/index.html +//! [`chain-spec-builder`]: ../chain_spec_builder/index.html +//! [`substrate-wasm-builder`]: https://crates.io/crates/substrate-wasm-builder + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// In this module, we explore substrate at a more depth. First, let's establish substrate being +/// divided into a client and runtime. +/// +/// ```mermaid +/// graph TB +/// subgraph Substrate +/// direction LR +/// subgraph Client +/// end +/// subgraph Runtime +/// end +/// end +/// ``` +/// +/// The client and the runtime of course need to communicate. This is done through two concepts: +/// +/// 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are +/// defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that +/// allow the runtime to read and write data to the on-chain state. +/// 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined +/// using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic +/// runtime API that any blockchain must implement in order to be able to (re) execute blocks. +/// +/// ```mermaid +/// graph TB +/// subgraph Substrate +/// direction LR +/// subgraph Client +/// end +/// subgraph Runtime +/// end +/// Client --runtime-api--> Runtime +/// Runtime --host-functions--> Client +/// end +/// ``` +/// +/// Finally, let's expand the diagram a bit further and look at the internals of each component: +/// +/// ```mermaid +/// graph TB +/// subgraph Substrate +/// direction LR +/// subgraph Client +/// Database +/// Networking +/// Consensus +/// end +/// subgraph Runtime +/// subgraph FRAME +/// direction LR +/// Governance +/// Currency +/// Staking +/// Identity +/// end +/// end +/// Client --runtime-api--> Runtime +/// Runtime --host-functions--> Client +/// end +/// ``` +/// +/// As noted the runtime contains all of the application specific logic of the blockchain. This is +/// usually written with `FRAME`. The client, on the other hand, contains reusable and generic +/// components that are not specific to one single blockchain, such as networking, database, and the +/// consensus engine. +/// +/// [`sp-io`]: ../../sp_io/index.html +/// [`sp-api`]: ../../sp_api/index.html +/// [`sp-io::storage`]: ../../sp_io/storage/index.html +/// [`sp-api::Core`]: ../../sp_api/trait.Core.html +pub mod substrate_diagram {} diff --git a/test-utils/cli/Cargo.toml b/test-utils/cli/Cargo.toml index cc05884a6ebcd..314fe7ad568f2 100644 --- a/test-utils/cli/Cargo.toml +++ b/test-utils/cli/Cargo.toml @@ -14,10 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] substrate-rpc-client = { path = "../../utils/frame/rpc/client" } +sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } assert_cmd = "2.0.10" nix = "0.26.2" regex = "1.7.3" -tempfile = "3.5.0" tokio = { version = "1.22.0", features = ["full"] } node-primitives = { path = "../../bin/node/primitives" } +node-cli = { path = "../../bin/node/cli" } +sc-cli = { path = "../../client/cli" } +sc-service = { path = "../../client/service" } futures = "0.3.28" + +[features] +try-runtime = ["node-cli/try-runtime"] diff --git a/test-utils/cli/build.rs b/test-utils/cli/build.rs new file mode 100644 index 0000000000000..a68cb706e8fbd --- /dev/null +++ b/test-utils/cli/build.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::env; + +fn main() { + if let Ok(profile) = env::var("PROFILE") { + println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + } +} diff --git a/test-utils/cli/src/lib.rs b/test-utils/cli/src/lib.rs index 526bc1f3776e1..99119a44d2e98 100644 --- a/test-utils/cli/src/lib.rs +++ b/test-utils/cli/src/lib.rs @@ -25,8 +25,8 @@ use nix::{ }; use node_primitives::{Hash, Header}; use regex::Regex; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use std::{ - env, io::{BufRead, BufReader, Read}, ops::{Deref, DerefMut}, path::{Path, PathBuf}, @@ -35,6 +35,38 @@ use std::{ }; use tokio::io::{AsyncBufReadExt, AsyncRead}; +/// Similar to [`crate::start_node`] spawns a node, but works in environments where the substrate +/// binary is not accessible with `cargo_bin("substrate-node")`, and allows customising the args +/// passed in. +/// +/// Helpful if you need a Substrate dev node running in the background of a project external to +/// `substrate`. +/// +/// The downside compared to using [`crate::start_node`] is that this method is blocking rather than +/// returning a [`Child`]. Therefore, you may want to call this method inside a new thread. +/// +/// # Example +/// ```ignore +/// // Spawn a dev node. +/// let _ = std::thread::spawn(move || { +/// match common::start_node_inline(vec!["--dev", "--rpc-port=12345"]) { +/// Ok(_) => {} +/// Err(e) => { +/// panic!("Node exited with error: {}", e); +/// } +/// } +/// }); +/// ``` +pub fn start_node_inline(args: Vec<&str>) -> Result<(), sc_service::error::Error> { + use sc_cli::SubstrateCli; + + // Prepend the args with some dummy value because the first arg is skipped. + let cli_call = std::iter::once("node-template").chain(args); + let cli = node_cli::Cli::from_iter(cli_call); + let runner = cli.create_runner(&cli.run).unwrap(); + runner.run_node_until_exit(|config| async move { node_cli::service::new_full(config, cli) }) +} + /// Starts a new Substrate node in development mode with a temporary chain. /// /// This function creates a new Substrate node using the `substrate` binary. @@ -61,7 +93,7 @@ use tokio::io::{AsyncBufReadExt, AsyncRead}; /// /// [`Child`]: std::process::Child pub fn start_node() -> Child { - Command::new(cargo_bin("substrate")) + Command::new(cargo_bin("substrate-node")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) .args(&["--dev", "--tmp", "--rpc-port=45789", "--no-hardware-benchmarks"]) @@ -98,15 +130,19 @@ pub fn start_node() -> Child { /// build_substrate(&["--features=try-runtime"]); /// ``` pub fn build_substrate(args: &[&str]) { + let is_release_build = !cfg!(build_type = "debug"); + // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable - let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); - let root_dir = std::path::Path::new(&manifest_dir) - .parent() - .expect("Failed to find root workspace directory"); - let output = Command::new("cargo") - .arg("build") + let mut cmd = Command::new("cargo"); + + cmd.arg("build").arg("-p=node-cli"); + + if is_release_build { + cmd.arg("--release"); + } + + let output = cmd .args(args) - .current_dir(root_dir) .output() .expect(format!("Failed to execute 'cargo b' with args {:?}'", args).as_str()); @@ -177,7 +213,8 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { use substrate_rpc_client::{ws_client, ChainApi}; let mut built_blocks = std::collections::HashSet::new(); - let mut interval = tokio::time::interval(Duration::from_secs(2)); + let block_duration = Duration::from_secs(2); + let mut interval = tokio::time::interval(block_duration); let rpc = ws_client(url).await.unwrap(); loop { @@ -194,7 +231,7 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { /// Run the node for a while (3 blocks) pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { run_with_timeout(Duration::from_secs(60 * 10), async move { - let mut cmd = Command::new(cargo_bin("substrate")) + let mut cmd = Command::new(cargo_bin("substrate-node")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) .args(args) @@ -220,6 +257,25 @@ pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { .await } +pub async fn block_hash(block_number: u64, url: &str) -> Result { + use substrate_rpc_client::{ws_client, ChainApi}; + + let rpc = ws_client(url).await.unwrap(); + + let result = ChainApi::<(), Hash, Header, ()>::block_hash( + &rpc, + Some(ListOrValue::Value(NumberOrHex::Number(block_number))), + ) + .await + .map_err(|_| "Couldn't get block hash".to_string())?; + + match result { + ListOrValue::Value(maybe_block_hash) if maybe_block_hash.is_some() => + Ok(maybe_block_hash.unwrap()), + _ => Err("Couldn't get block hash".to_string()), + } +} + pub struct KillChildOnDrop(pub Child); impl KillChildOnDrop { diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 682c868f290d2..d921a879fba2e 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -13,11 +13,11 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" -serde = "1.0.136" +serde = "1.0.163" serde_json = "1.0.85" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, features = [ @@ -31,8 +31,8 @@ sc-service = { version = "0.10.0-dev", default-features = false, features = [ ], path = "../../client/service" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-keyring = { version = "7.0.0", path = "../../primitives/keyring" } -sp-keystore = { version = "0.13.0", path = "../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-keyring = { version = "24.0.0", path = "../../primitives/keyring" } +sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index a258faa5e03e3..8efa7b5f07f8d 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -87,10 +87,9 @@ where /// This implementation is required, because of the weird api requirements around `BlockImport`. #[async_trait::async_trait] -impl ClientBlockImportExt for std::sync::Arc +impl ClientBlockImportExt for std::sync::Arc where - for<'r> &'r T: BlockImport, - Transaction: Send + 'static, + for<'r> &'r T: BlockImport, T: Send + Sync, { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { @@ -153,7 +152,6 @@ where RA: Send, B: Send + Sync, E: Send, - >::Transaction: Send, { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index fc9ba1c9e0dd9..90e15e0f8d53e 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -22,10 +22,7 @@ pub mod client_ext; pub use self::client_ext::{ClientBlockImportExt, ClientExt}; -pub use sc_client_api::{ - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, - BadBlocks, ForkBlocks, -}; +pub use sc_client_api::{execution_extensions::ExecutionExtensions, BadBlocks, ForkBlocks}; pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; @@ -35,14 +32,17 @@ pub use sp_keyring::{ }; pub use sp_keystore::{Keystore, KeystorePtr}; pub use sp_runtime::{Storage, StorageChild}; -pub use sp_state_machine::ExecutionStrategy; use futures::{future::Future, stream::StreamExt}; use sc_client_api::BlockchainEvents; use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::{storage::ChildInfo, testing::TaskExecutor}; -use sp_runtime::{codec::Encode, traits::Block as BlockT, OpaqueExtrinsic}; +use sp_runtime::{ + codec::Encode, + traits::{Block as BlockT, Header}, + OpaqueExtrinsic, +}; use std::{ collections::{HashMap, HashSet}, pin::Pin, @@ -63,14 +63,12 @@ impl GenesisInit for () { /// A builder for creating a test client instance. pub struct TestClientBuilder { - execution_strategies: ExecutionStrategies, genesis_init: G, /// The key is an unprefixed storage key, this only contains /// default child trie content. child_storage_extension: HashMap, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, - keystore: Option, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, @@ -115,11 +113,9 @@ impl pub fn with_backend(backend: Arc) -> Self { TestClientBuilder { backend, - execution_strategies: ExecutionStrategies::default(), child_storage_extension: Default::default(), genesis_init: Default::default(), _executor: Default::default(), - keystore: None, fork_blocks: None, bad_blocks: None, enable_offchain_indexing_api: false, @@ -127,12 +123,6 @@ impl } } - /// Set the keystore that should be used by the externalities. - pub fn set_keystore(mut self, keystore: KeystorePtr) -> Self { - self.keystore = Some(keystore); - self - } - /// Alter the genesis storage parameters. pub fn genesis_init_mut(&mut self) -> &mut G { &mut self.genesis_init @@ -158,18 +148,6 @@ impl self } - /// Set the execution strategy that should be used by all contexts. - pub fn set_execution_strategy(mut self, execution_strategy: ExecutionStrategy) -> Self { - self.execution_strategies = ExecutionStrategies { - syncing: execution_strategy, - importing: execution_strategy, - block_construction: execution_strategy, - offchain_worker: execution_strategy, - other: execution_strategy, - }; - self - } - /// Sets custom block rules. pub fn set_block_rules( mut self, @@ -292,12 +270,7 @@ impl self.backend.clone(), executor.clone(), Default::default(), - ExecutionExtensions::new( - self.execution_strategies.clone(), - self.keystore.clone(), - sc_offchain::OffchainDb::factory_from_backend(&*self.backend), - Arc::new(executor), - ), + ExecutionExtensions::new(None, Arc::new(executor)), ) .expect("Creates LocalCallExecutor"); @@ -410,7 +383,7 @@ where Box::pin(async move { while let Some(notification) = import_notification_stream.next().await { if notification.is_new_best { - blocks.insert(notification.hash); + blocks.insert(*notification.header.number()); if blocks.len() == count { break } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index f3e38ada2b30d..b5d73b8e99396 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -12,8 +12,8 @@ publish = false [dependencies] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" -quote = "1.0.26" -syn = { version = "2.0.14", features = ["full"] } +quote = "1.0.28" +syn = { version = "2.0.16", features = ["full"] } [lib] proc-macro = true diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 2bcdafed96123..43ab0da98bd6a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -13,56 +13,53 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-beefy-mmr = { version = "4.0.0-dev", default-features = false, path = "../../frame/beefy-mmr" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura", features = ["serde"] } +sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } +sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../primitives/genesis-builder" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "7.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.32.0", default-features = false } +sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring" } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } -sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../../primitives/runtime-interface" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } -sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } +sp-version = { version = "22.0.0", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../frame/balances" } -pallet-root-testing = { version = "1.0.0-dev", default-features = false, path = "../../frame/root-testing" } -pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../frame/sudo" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../frame/executive" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } -sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/grandpa" } -sp-trie = { version = "7.0.0", default-features = false, path = "../../primitives/trie" } +sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/grandpa", features = ["serde"] } +sp-trie = { version = "22.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.27.0", default-features = false } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } -sp-state-machine = { version = "0.13.0", default-features = false, path = "../../primitives/state-machine" } -sp-externalities = { version = "0.13.0", default-features = false, path = "../../primitives/externalities" } -sp-debug-derive = { path = "../../primitives/debug-derive" } +sp-state-machine = { version = "0.28.0", default-features = false, path = "../../primitives/state-machine" } +sp-externalities = { version = "0.19.0", default-features = false, path = "../../primitives/externalities" } # 3rd party array-bytes = { version = "6.1", optional = true } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.136", optional = true, features = ["derive"] } +serde = { version = "1.0.163", features = ["alloc", "derive"], default-features = false } +serde_json = { version = "1.0.85", default-features = false, features = ["alloc"] } [dev-dependencies] futures = "0.3.21" sc-block-builder = { version = "0.10.0-dev", path = "../../client/block-builder" } sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } +sc-executor-common = { version = "0.10.0-dev", path = "../../client/executor/common" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } -sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../primitives/tracing" } +json-patch = { version = "1.0.0", default-features = false } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../utils/wasm-builder", optional = true } @@ -71,39 +68,33 @@ substrate-wasm-builder = { version = "5.0.0-dev", path = "../../utils/wasm-build default = [ "std", ] + std = [ "array-bytes", + "sp-genesis-builder/std", "sp-application-crypto/std", "sp-consensus-aura/std", "sp-consensus-babe/std", - "sp-consensus-beefy/std", "sp-block-builder/std", "codec/std", "scale-info/std", "sp-inherents/std", "sp-keyring", "log/std", - "memory-db/std", "sp-offchain/std", "sp-core/std", - "sp-core/std", "sp-std/std", - "sp-runtime-interface/std", "sp-io/std", "frame-support/std", "sp-version/std", - "serde", "sp-session/std", "sp-api/std", "sp-runtime/std", "sp-externalities/std", "sp-state-machine/std", "pallet-babe/std", - "pallet-beefy-mmr/std", "pallet-timestamp/std", "pallet-balances/std", - "pallet-sudo/std", - "pallet-root-testing/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "sc-service", @@ -112,6 +103,12 @@ std = [ "sp-transaction-pool/std", "trie-db/std", "substrate-wasm-builder", + "frame-executive/std", + "sc-executor/std", + "sp-tracing/std" ] # Special feature to disable logging disable-logging = [ "sp-api/disable-logging" ] + +#Enabling this flag will disable GenesisBuilder API implementation in runtime. +disable-genesis-builder = [] diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index dd79ce2c5ae84..230606635f7dc 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +const BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV: &str = "BUILD_NO_GENESIS_BUILDER_SUPPORT"; + fn main() { #[cfg(feature = "std")] { @@ -29,6 +31,19 @@ fn main() { .build(); } + #[cfg(feature = "std")] + if std::env::var(BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV).is_ok() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") + .set_file_name("wasm_binary_no_genesis_builder") + .import_memory() + .enable_feature("disable-genesis-builder") + .build(); + } + println!("cargo:rerun-if-env-changed={}", BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV); + #[cfg(feature = "std")] { substrate_wasm_builder::WasmBuilder::new() diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 986db0ba60283..8269fb72fe508 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,16 +12,14 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } futures = "0.3.21" sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } -sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } substrate-test-client = { version = "2.0.0", path = "../../client" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index a9b0d49f3543e..78863209e33e9 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -49,11 +49,7 @@ impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi - + ApiExt< - substrate_test_runtime::Block, - StateBackend = backend::StateBackendFor, - >, + A::Api: BlockBuilderApi + ApiExt, B: backend::Backend, { fn push_transfer( diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 39ee6667ff9f0..7428a7de3a096 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -30,8 +30,7 @@ pub use substrate_test_runtime as runtime; pub use self::block_builder_ext::BlockBuilderExt; -use sp_core::storage::{ChildInfo, Storage, StorageChild}; -use substrate_test_client::sc_executor::WasmExecutor; +use sp_core::storage::ChildInfo; use substrate_test_runtime::genesismap::GenesisStorageBuilder; /// A prelude to import in tests. diff --git a/test-utils/runtime/src/extrinsic.rs b/test-utils/runtime/src/extrinsic.rs index a6e13226face0..05ffb7db5d5b9 100644 --- a/test-utils/runtime/src/extrinsic.rs +++ b/test-utils/runtime/src/extrinsic.rs @@ -19,7 +19,7 @@ use crate::{ substrate_test_pallet::pallet::Call as PalletCall, AccountId, Balance, BalancesCall, - CheckSubstrateCall, Extrinsic, Index, Pair, RuntimeCall, SignedPayload, TransferData, + CheckSubstrateCall, Extrinsic, Nonce, Pair, RuntimeCall, SignedPayload, TransferData, }; use codec::Encode; use frame_system::{CheckNonce, CheckWeight}; @@ -81,7 +81,7 @@ impl TryFrom<&Extrinsic> for TransferData { pub struct ExtrinsicBuilder { function: RuntimeCall, signer: Option, - nonce: Option, + nonce: Option, } impl ExtrinsicBuilder { @@ -176,7 +176,7 @@ impl ExtrinsicBuilder { } /// Given `nonce` will be set in `Extrinsic` - pub fn nonce(mut self, nonce: Index) -> Self { + pub fn nonce(mut self, nonce: Nonce) -> Self { self.nonce = Some(nonce); self } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index aa57eb1928fc2..8a4d6dbe4a71a 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -18,7 +18,7 @@ //! Tool for creating the genesis block. use super::{ - currency, substrate_test_pallet, wasm_binary_unwrap, AccountId, Balance, GenesisConfig, + currency, substrate_test_pallet, wasm_binary_unwrap, AccountId, Balance, RuntimeGenesisConfig, }; use codec::Encode; use sc_service::construct_genesis_block; @@ -73,10 +73,10 @@ impl Default for GenesisStorageBuilder { } impl GenesisStorageBuilder { - /// Creates a storage builder for genesis config. `substrage test runtime` `GenesisConfig` is - /// initialized with provided `authorities`, `endowed_accounts` with given balance. Key-pairs - /// from `extra_storage` will be injected into built storage. `HEAP_PAGES` key and value will - /// also be placed into storage. + /// Creates a storage builder for genesis config. `substrage test runtime` + /// [`RuntimeGenesisConfig`] is initialized with provided `authorities`, `endowed_accounts` with + /// given balance. Key-value pairs from `extra_storage` will be injected into built storage. + /// `HEAP_PAGES` key and value will also be placed into storage. pub fn new( authorities: Vec, endowed_accounts: Vec, @@ -91,7 +91,7 @@ impl GenesisStorageBuilder { } } - /// Override default wasm code to be placed into GenesisConfig. + /// Override default wasm code to be placed into RuntimeGenesisConfig. pub fn with_wasm_code(mut self, wasm_code: &Option>) -> Self { self.wasm_code = wasm_code.clone(); self @@ -107,8 +107,8 @@ impl GenesisStorageBuilder { self } - /// Builds the `GenesisConfig` and returns its storage. - pub fn build(self) -> Storage { + /// A `RuntimeGenesisConfig` from internal configuration + pub fn genesis_config(&self) -> RuntimeGenesisConfig { let authorities_sr25519: Vec<_> = self .authorities .clone() @@ -116,9 +116,10 @@ impl GenesisStorageBuilder { .map(|id| sr25519::Public::from(id)) .collect(); - let genesis_config = GenesisConfig { + RuntimeGenesisConfig { system: frame_system::GenesisConfig { code: self.wasm_code.clone().unwrap_or(wasm_binary_unwrap().to_vec()), + ..Default::default() }, babe: pallet_babe::GenesisConfig { authorities: authorities_sr25519 @@ -127,21 +128,26 @@ impl GenesisStorageBuilder { .map(|x| (x.into(), 1)) .collect(), epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), + ..Default::default() }, substrate_test: substrate_test_pallet::GenesisConfig { authorities: authorities_sr25519.clone(), + ..Default::default() }, balances: pallet_balances::GenesisConfig { balances: self.balances.clone() }, - }; + } + } - let mut storage = genesis_config + /// Builds the `RuntimeGenesisConfig` and returns its storage. + pub fn build(self) -> Storage { + let mut storage = self + .genesis_config() .build_storage() - .expect("Build storage from substrate-test-runtime GenesisConfig"); + .expect("Build storage from substrate-test-runtime RuntimeGenesisConfig"); - storage.top.insert( - well_known_keys::HEAP_PAGES.into(), - self.heap_pages_override.unwrap_or(16_u64).encode(), - ); + if let Some(heap_pages) = self.heap_pages_override { + storage.top.insert(well_known_keys::HEAP_PAGES.into(), heap_pages.encode()); + } storage.top.extend(self.extra_storage.top.clone()); storage.children_default.extend(self.extra_storage.children_default.clone()); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e1d66e86002e7..b116c8556815f 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -26,6 +26,8 @@ pub mod genesismap; pub mod substrate_test_pallet; use codec::{Decode, Encode}; +#[cfg(not(feature = "disable-genesis-builder"))] +use frame_support::genesis_builder_helper::{build_config, create_default_config}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -42,6 +44,8 @@ use frame_system::{ }; use scale_info::TypeInfo; use sp_std::prelude::*; +#[cfg(not(feature = "std"))] +use sp_std::vec; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{OpaqueMetadata, RuntimeDebug}; @@ -130,7 +134,7 @@ pub struct TransferData { pub from: AccountId, pub to: AccountId, pub amount: Balance, - pub nonce: Index, + pub nonce: Nonce, } /// The address format for describing accounts. @@ -156,7 +160,7 @@ pub type Hashing = BlakeTwo256; /// The block number type used in this runtime. pub type BlockNumber = u64; /// Index of a transaction. -pub type Index = u64; +pub type Nonce = u64; /// The item of a block digest. pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. @@ -217,6 +221,8 @@ decl_runtime_apis! { fn do_trace_log(); /// Verify the given signature, public & message bundle. fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool; + /// Write the given `value` under the given `key` into the storage and then optional panic. + fn write_key_value(key: Vec, value: Vec, panic: bool); } } @@ -291,10 +297,7 @@ impl sp_runtime::traits::SignedExtension for CheckSubstrateCall { } construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = Extrinsic + pub enum Runtime { System: frame_system, Babe: pallet_babe, @@ -346,13 +349,12 @@ impl frame_system::pallet::Config for Runtime { type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = Index; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type Hash = H256; type Hashing = Hashing; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; - type Header = Header; + type Block = Block; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<2400>; type DbWeight = (); @@ -394,7 +396,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type MaxHolds = ConstU32<1>; } @@ -421,6 +423,7 @@ impl pallet_babe::Config for Runtime { type EquivocationReportSystem = (); type WeightInfo = (); type MaxAuthorities = ConstU32<10>; + type MaxNominators = ConstU32<100>; } /// Adds one to the given input and returns the final result. @@ -528,8 +531,8 @@ impl_runtime_apis! { } } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } @@ -606,6 +609,14 @@ impl_runtime_apis! { fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool { sp_io::crypto::ed25519_verify(&sig, &message, &public) } + + fn write_key_value(key: Vec, value: Vec, panic: bool) { + sp_io::storage::set(&key, &value); + + if panic { + panic!("I'm just following my master"); + } + } } impl sp_consensus_aura::AuraApi for Runtime { @@ -710,6 +721,17 @@ impl_runtime_apis! { None } } + + #[cfg(not(feature = "disable-genesis-builder"))] + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } } fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { @@ -794,10 +816,8 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { let db: sp_trie::MemoryDB = proof.into_memory_db(); let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let mut overlay = sp_state_machine::OverlayedChanges::default(); - let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, - &mut cache, &backend, #[cfg(feature = "std")] None, @@ -835,28 +855,31 @@ pub mod storage_key_generator { /// Generate the hashed storage keys from the raw literals. These keys are expected to be be in /// storage with given substrate-test runtime. - pub fn generate_expected_storage_hashed_keys() -> Vec { - let literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index", b":heappages"]; + pub fn generate_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec { + let mut literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index"]; + + if custom_heap_pages { + literals.push(b":heappages"); + } let keys: Vec> = vec![ + vec![b"Babe", b":__STORAGE_VERSION__:"], vec![b"Babe", b"Authorities"], vec![b"Babe", b"EpochConfig"], vec![b"Babe", b"NextAuthorities"], vec![b"Babe", b"SegmentIndex"], - vec![b"Babe", b":__STORAGE_VERSION__:"], vec![b"Balances", b":__STORAGE_VERSION__:"], vec![b"Balances", b"TotalIssuance"], - vec![b"SubstrateTest", b"Authorities"], vec![b"SubstrateTest", b":__STORAGE_VERSION__:"], + vec![b"SubstrateTest", b"Authorities"], + vec![b"System", b":__STORAGE_VERSION__:"], vec![b"System", b"LastRuntimeUpgrade"], vec![b"System", b"ParentHash"], - vec![b"System", b":__STORAGE_VERSION__:"], vec![b"System", b"UpgradedToTripleRefCount"], vec![b"System", b"UpgradedToU32RefCount"], ]; let mut expected_keys = keys.iter().map(concat_hashes).collect::>(); - expected_keys.extend(literals.into_iter().map(hex)); let balances_map_keys = (0..16_usize) @@ -896,9 +919,12 @@ pub mod storage_key_generator { /// that would be generated by `generate_expected_storage_hashed_keys`. This list is provided /// for the debugging convenience only. Value of each hex-string is documented with the literal /// origin. - pub fn get_expected_storage_hashed_keys() -> Vec { - [ - //System|:__STORAGE_VERSION__: + /// + /// `custom_heap_pages`: Should be set to `true` when the state contains the `:heap_pages` key + /// aka when overriding the heap pages to be used by the executor. + pub fn get_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec<&'static str> { + let mut res = vec![ + //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", //SubstrateTest|Authorities "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", @@ -967,20 +993,25 @@ pub mod storage_key_generator { "3a636f6465", // :extrinsic_index "3a65787472696e7369635f696e646578", - // :heappages - "3a686561707061676573", // Balances|:__STORAGE_VERSION__: "c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429", // Balances|TotalIssuance "c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80", - ].into_iter().map(String::from).collect::>() + ]; + + if custom_heap_pages { + // :heappages + res.push("3a686561707061676573"); + } + + res } #[test] fn expected_keys_vec_are_matching() { assert_eq!( - storage_key_generator::get_expected_storage_hashed_keys(), - storage_key_generator::generate_expected_storage_hashed_keys(), + storage_key_generator::get_expected_storage_hashed_keys(false), + storage_key_generator::generate_expected_storage_hashed_keys(false), ); } } @@ -991,12 +1022,14 @@ mod tests { use codec::Encode; use frame_support::dispatch::DispatchInfo; use sc_block_builder::BlockBuilderProvider; - use sp_api::ProvideRuntimeApi; + use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; - use sp_core::{storage::well_known_keys::HEAP_PAGES, ExecutionContext}; + use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_keyring::AccountKeyring; - use sp_runtime::{traits::SignedExtension, transaction_validity::InvalidTransaction}; - use sp_state_machine::ExecutionStrategy; + use sp_runtime::{ + traits::{Hash as _, SignedExtension}, + transaction_validity::{InvalidTransaction, ValidTransaction}, + }; use substrate_test_runtime_client::{ prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, }; @@ -1006,20 +1039,15 @@ mod tests { // This tests that the on-chain `HEAP_PAGES` parameter is respected. // Create a client devoting only 8 pages of wasm memory. This gives us ~512k of heap memory. - let mut client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .set_heap_pages(8) - .build(); + let mut client = TestClientBuilder::new().set_heap_pages(8).build(); let best_hash = client.chain_info().best_hash; // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger // than the heap. - let ret = client.runtime_api().vec_with_capacity_with_context( - best_hash, - // Use `BlockImport` to ensure we use the on chain heap pages as configured above. - ExecutionContext::Importing, - 1048576, - ); + let mut runtime_api = client.runtime_api(); + // This is currently required to allocate the 1024k of memory as configured above. + runtime_api.set_call_context(CallContext::Onchain); + let ret = runtime_api.vec_with_capacity(best_hash, 1048576); assert!(ret.is_err()); // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to @@ -1041,8 +1069,7 @@ mod tests { #[test] fn test_storage() { - let client = - TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; @@ -1067,8 +1094,7 @@ mod tests { let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); - let client = - TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); + let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); let best_hash = client.chain_info().best_hash; @@ -1095,7 +1121,7 @@ mod tests { .cloned() .map(storage_key_generator::hex) .collect::>(), - storage_key_generator::get_expected_storage_hashed_keys() + storage_key_generator::get_expected_storage_hashed_keys(false) ); } @@ -1103,63 +1129,42 @@ mod tests { fn validate_unsigned_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::bench_call { transfer: Default::default() }, - ), - InvalidTransaction::Call.into(), - ); - - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::include_data { data: vec![] }, - ), - InvalidTransaction::Call.into(), - ); - - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::fill_block { ratio: Perbill::from_percent(50) }, - ), - InvalidTransaction::Call.into(), - ); - - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::deposit_log_digest_item { - log: DigestItem::Other(vec![]) - }, - ), - Ok(Default::default()), - ); - - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::storage_change { key: vec![], value: None }, - ), - Ok(Default::default()), - ); - - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::read { count: 0 }, - ), - Ok(Default::default()), - ); + let failing_calls = vec![ + substrate_test_pallet::Call::bench_call { transfer: Default::default() }, + substrate_test_pallet::Call::include_data { data: vec![] }, + substrate_test_pallet::Call::fill_block { ratio: Perbill::from_percent(50) }, + ]; + let succeeding_calls = vec![ + substrate_test_pallet::Call::deposit_log_digest_item { + log: DigestItem::Other(vec![]), + }, + substrate_test_pallet::Call::storage_change { key: vec![], value: None }, + substrate_test_pallet::Call::read { count: 0 }, + substrate_test_pallet::Call::read_and_panic { count: 0 }, + ]; + + for call in failing_calls { + assert_eq!( + ::validate_unsigned( + TransactionSource::External, + &call, + ), + InvalidTransaction::Call.into(), + ); + } - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &substrate_test_pallet::Call::read_and_panic { count: 0 }, - ), - Ok(Default::default()), - ); + for call in succeeding_calls { + assert_eq!( + ::validate_unsigned( + TransactionSource::External, + &call, + ), + Ok(ValidTransaction { + provides: vec![BlakeTwo256::hash_of(&call).encode()], + ..Default::default() + }) + ); + } }); } @@ -1197,4 +1202,249 @@ mod tests { ); }) } + + #[cfg(not(feature = "disable-genesis-builder"))] + mod genesis_builder_tests { + use super::*; + use crate::genesismap::GenesisStorageBuilder; + use sc_executor::{error::Result, WasmExecutor}; + use sc_executor_common::runtime_blob::RuntimeBlob; + use serde_json::json; + use sp_application_crypto::Ss58Codec; + use sp_core::traits::Externalities; + use sp_genesis_builder::Result as BuildResult; + use sp_state_machine::BasicExternalities; + use std::{fs, io::Write}; + use storage_key_generator::hex; + + pub fn executor_call( + ext: &mut dyn Externalities, + method: &str, + data: &[u8], + ) -> Result> { + let executor = WasmExecutor::::builder().build(); + executor.uncached_call( + RuntimeBlob::uncompress_if_needed(wasm_binary_unwrap()).unwrap(), + ext, + true, + method, + data, + ) + } + + #[test] + fn build_minimal_genesis_config_works() { + sp_tracing::try_init_simple(); + let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let mut t = BasicExternalities::new_empty(); + + executor_call(&mut t, "GenesisBuilder_build_config", &default_minimal_json.encode()) + .unwrap(); + + let mut keys = t.into_storages().top.keys().cloned().map(hex).collect::>(); + keys.sort(); + + let mut expected = [ + //SubstrateTest|Authorities + "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", + //Babe|SegmentIndex + "1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4", + //Babe|EpochConfig + "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", + //System|UpgradedToU32RefCount + "26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710", + //System|ParentHash + "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", + //System::BlockHash|0 + "26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000", + //System|UpgradedToTripleRefCount + "26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439", + + // System|LastRuntimeUpgrade + "26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8", + // :code + "3a636f6465", + // :extrinsic_index + "3a65787472696e7369635f696e646578", + // Balances|TotalIssuance + "c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80", + + // added by on_genesis: + // Balances|:__STORAGE_VERSION__: + "c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429", + //System|:__STORAGE_VERSION__: + "26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429", + //Babe|:__STORAGE_VERSION__: + "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", + //SubstrateTest|:__STORAGE_VERSION__: + "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", + ].into_iter().map(String::from).collect::>(); + expected.sort(); + + assert_eq!(keys, expected); + } + + #[test] + fn default_config_as_json_works() { + sp_tracing::try_init_simple(); + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![]).unwrap(); + let r = Vec::::decode(&mut &r[..]).unwrap(); + let json = String::from_utf8(r.into()).expect("returned value is json. qed."); + + let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + assert_eq!(expected.to_string(), json); + } + + #[test] + fn build_config_from_json_works() { + sp_tracing::try_init_simple(); + let j = include_str!("test_json/default_genesis_config.json"); + + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); + let r = BuildResult::decode(&mut &r[..]); + assert!(r.is_ok()); + + let keys = t.into_storages().top.keys().cloned().map(hex).collect::>(); + assert_eq!(keys, storage_key_generator::get_expected_storage_hashed_keys(false)); + } + + #[test] + fn build_config_from_invalid_json_fails() { + sp_tracing::try_init_simple(); + let j = include_str!("test_json/default_genesis_config_invalid.json"); + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); + let r = BuildResult::decode(&mut &r[..]).unwrap(); + log::info!("result: {:#?}", r); + assert_eq!(r, Err( + sp_runtime::RuntimeString::Owned( + "Invalid JSON blob: unknown field `renamed_authorities`, expected `authorities` or `epochConfig` at line 6 column 25".to_string(), + )) + ); + } + + #[test] + fn build_config_from_incomplete_json_fails() { + sp_tracing::try_init_simple(); + let j = include_str!("test_json/default_genesis_config_incomplete.json"); + + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_build_config", &j.encode()).unwrap(); + let r = + core::result::Result::<(), sp_runtime::RuntimeString>::decode(&mut &r[..]).unwrap(); + assert_eq!( + r, + Err(sp_runtime::RuntimeString::Owned( + "Invalid JSON blob: missing field `authorities` at line 13 column 3" + .to_string() + )) + ); + } + + #[test] + fn write_default_config_to_tmp_file() { + if std::env::var("WRITE_DEFAULT_JSON_FOR_STR_GC").is_ok() { + sp_tracing::try_init_simple(); + let mut file = fs::OpenOptions::new() + .create(true) + .write(true) + .open("/tmp/default_genesis_config.json") + .unwrap(); + + let j = serde_json::to_string(&GenesisStorageBuilder::default().genesis_config()) + .unwrap() + .into_bytes(); + file.write_all(&j).unwrap(); + } + } + + #[test] + fn build_genesis_config_with_patch_json_works() { + //this tests shows how to do patching on native side + sp_tracing::try_init_simple(); + + let mut t = BasicExternalities::new_empty(); + let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![]).unwrap(); + let r = Vec::::decode(&mut &r[..]).unwrap(); + let mut default_config: serde_json::Value = + serde_json::from_slice(&r[..]).expect("returned value is json. qed."); + + // Patch default json with some custom values: + let patch = json!({ + "babe": { + "epochConfig": { + "c": [ + 7, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() + ], + } + }); + + json_patch::merge(&mut default_config, &patch); + + // Build genesis config using custom json: + let mut t = BasicExternalities::new_empty(); + executor_call( + &mut t, + "GenesisBuilder_build_config", + &default_config.to_string().encode(), + ) + .unwrap(); + + // Ensure that custom values are in the genesis storage: + let storage = t.into_storages(); + let get_from_storage = |key: &str| -> Vec { + storage.top.get(&array_bytes::hex2bytes(key).unwrap()).unwrap().clone() + }; + + //SubstrateTest|Authorities + let value: Vec = get_from_storage( + "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", + ); + let authority_key_vec = + Vec::::decode(&mut &value[..]).unwrap(); + assert_eq!(authority_key_vec.len(), 2); + assert_eq!(authority_key_vec[0], sp_keyring::AccountKeyring::Ferdie.public()); + assert_eq!(authority_key_vec[1], sp_keyring::AccountKeyring::Alice.public()); + + //Babe|Authorities + let value: Vec = get_from_storage( + "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", + ); + assert_eq!( + BabeEpochConfiguration::decode(&mut &value[..]).unwrap(), + BabeEpochConfiguration { + c: (7, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots + } + ); + + // Ensure that some values are default ones: + // Balances|TotalIssuance + let value: Vec = get_from_storage( + "c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80", + ); + assert_eq!(u64::decode(&mut &value[..]).unwrap(), 0); + + // :code + let value: Vec = get_from_storage("3a636f6465"); + assert!(Vec::::decode(&mut &value[..]).is_err()); + + //System|ParentHash + let value: Vec = get_from_storage( + "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", + ); + assert_eq!(H256::decode(&mut &value[..]).unwrap(), [69u8; 32].into()); + } + } } diff --git a/test-utils/runtime/src/substrate_test_pallet.rs b/test-utils/runtime/src/substrate_test_pallet.rs index 40e7af0b43769..ed1ad990472ba 100644 --- a/test-utils/runtime/src/substrate_test_pallet.rs +++ b/test-utils/runtime/src/substrate_test_pallet.rs @@ -23,8 +23,11 @@ use frame_support::{pallet_prelude::*, storage}; use sp_core::sr25519::Public; -use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionSource, TransactionValidity, ValidTransaction, +use sp_runtime::{ + traits::Hash, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, ValidTransaction, + }, }; use sp_std::prelude::*; @@ -38,11 +41,11 @@ pub mod pallet { use crate::TransferData; use frame_system::pallet_prelude::*; use sp_core::storage::well_known_keys; - use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; + use sp_runtime::{traits::BlakeTwo256, transaction_validity::TransactionPriority, Perbill}; #[pallet::pallet] #[pallet::without_storage_info] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config {} @@ -52,13 +55,15 @@ pub mod pallet { pub type Authorities = StorageValue<_, Vec, ValueQuery>; #[pallet::genesis_config] - #[cfg_attr(feature = "std", derive(Default))] - pub struct GenesisConfig { + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { pub authorities: Vec, + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { >::put(self.authorities.clone()); } @@ -225,7 +230,10 @@ pub mod pallet { Call::deposit_log_digest_item { .. } | Call::storage_change { .. } | Call::read { .. } | - Call::read_and_panic { .. } => Ok(Default::default()), + Call::read_and_panic { .. } => Ok(ValidTransaction { + provides: vec![BlakeTwo256::hash_of(&call).encode()], + ..Default::default() + }), _ => Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), } } diff --git a/test-utils/runtime/src/test_json/README.md b/test-utils/runtime/src/test_json/README.md new file mode 100644 index 0000000000000..6d6ae55c34639 --- /dev/null +++ b/test-utils/runtime/src/test_json/README.md @@ -0,0 +1,24 @@ +`default_genesis_config.json` file has been generated by the following code: +``` + use crate::genesismap::GenesisStorageBuilder; + #[test] + fn write_default_config_to_tmp_file() { + let j = json::to_string(&GenesisStorageBuilder::default().genesis_config()).unwrap().into_bytes(); + let mut file = fs::OpenOptions::new() + .create(true) + .write(true) + .open("/tmp/default_genesis_config.json").unwrap(); + file.write_all(&j); + } +``` + +`:code` field has been manually truncated to reduce file size. Test is only +comparing keys, not the values. + +`default_genesis_config_invalid.json` is just a broken copy of +`default_genesis_config.json` with `authorities` field renamed to +`renamed_authorities`. + + +`default_genesis_config_invalid.json` is just an imcomplete copy of +`default_genesis_config.json` with `babe::authorities` field removed. diff --git a/test-utils/runtime/src/test_json/default_genesis_config.json b/test-utils/runtime/src/test_json/default_genesis_config.json new file mode 100644 index 0000000000000..b0218d417daa5 --- /dev/null +++ b/test-utils/runtime/src/test_json/default_genesis_config.json @@ -0,0 +1,115 @@ +{ + "system": { + "code": "0x52" + }, + "babe": { + "authorities": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1 + ] + ], + "epochConfig": { + "c": [ + 3, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" + ] + }, + "balances": { + "balances": [ + [ + "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", + 100000000000000000 + ], + [ + "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", + 100000000000000000 + ], + [ + "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", + 100000000000000000 + ], + [ + "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", + 100000000000000000 + ], + [ + "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", + 100000000000000000 + ], + [ + "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", + 100000000000000000 + ], + [ + "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", + 100000000000000000 + ], + [ + "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", + 100000000000000000 + ], + [ + "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", + 100000000000000000 + ], + [ + "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", + 100000000000000000 + ], + [ + "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", + 100000000000000000 + ], + [ + "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", + 100000000000000000 + ], + [ + "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", + 100000000000000000 + ], + [ + "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", + 100000000000000000 + ], + [ + "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", + 100000000000000000 + ], + [ + "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", + 100000000000000000 + ], + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 100000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 100000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 100000000000000000 + ] + ] + } +} diff --git a/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json new file mode 100644 index 0000000000000..e25730ee11cf0 --- /dev/null +++ b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json @@ -0,0 +1,101 @@ +{ + "system": { + "code": "0x52" + }, + "babe": { + "epochConfig": { + "c": [ + 3, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" + ] + }, + "balances": { + "balances": [ + [ + "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", + 100000000000000000 + ], + [ + "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", + 100000000000000000 + ], + [ + "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", + 100000000000000000 + ], + [ + "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", + 100000000000000000 + ], + [ + "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", + 100000000000000000 + ], + [ + "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", + 100000000000000000 + ], + [ + "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", + 100000000000000000 + ], + [ + "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", + 100000000000000000 + ], + [ + "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", + 100000000000000000 + ], + [ + "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", + 100000000000000000 + ], + [ + "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", + 100000000000000000 + ], + [ + "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", + 100000000000000000 + ], + [ + "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", + 100000000000000000 + ], + [ + "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", + 100000000000000000 + ], + [ + "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", + 100000000000000000 + ], + [ + "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", + 100000000000000000 + ], + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 100000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 100000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 100000000000000000 + ] + ] + } +} diff --git a/test-utils/runtime/src/test_json/default_genesis_config_invalid.json b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json new file mode 100644 index 0000000000000..00550efaeec9f --- /dev/null +++ b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json @@ -0,0 +1,115 @@ +{ + "system": { + "code": "0x52" + }, + "babe": { + "renamed_authorities": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1 + ] + ], + "epochConfig": { + "c": [ + 3, + 10 + ], + "allowed_slots": "PrimaryAndSecondaryPlainSlots" + } + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" + ] + }, + "balances": { + "balances": [ + [ + "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", + 100000000000000000 + ], + [ + "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", + 100000000000000000 + ], + [ + "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", + 100000000000000000 + ], + [ + "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", + 100000000000000000 + ], + [ + "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", + 100000000000000000 + ], + [ + "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", + 100000000000000000 + ], + [ + "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", + 100000000000000000 + ], + [ + "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", + 100000000000000000 + ], + [ + "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", + 100000000000000000 + ], + [ + "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", + 100000000000000000 + ], + [ + "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", + 100000000000000000 + ], + [ + "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", + 100000000000000000 + ], + [ + "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", + 100000000000000000 + ], + [ + "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", + 100000000000000000 + ], + [ + "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", + 100000000000000000 + ], + [ + "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", + 100000000000000000 + ], + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 100000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 100000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 100000000000000000 + ] + ] + } +} diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 5ce397474fec6..18efb2c8a0a61 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" parking_lot = "0.12.1" thiserror = "1.0" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../client" } diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 8e28449661650..7b52920044027 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -36,7 +36,7 @@ use sp_runtime::{ use std::collections::{BTreeMap, HashMap, HashSet}; use substrate_test_runtime_client::{ runtime::{ - AccountId, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hash, Header, Index, Transfer, + AccountId, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData, }, AccountKeyring::{self, *}, @@ -377,7 +377,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// Generate transfer extrinsic with a given nonce. /// /// Part of the test api. -pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { +pub fn uxt(who: AccountKeyring, nonce: Nonce) -> Extrinsic { let dummy = codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap(); let transfer = Transfer { from: who.into(), to: dummy, nonce, amount: 1 }; ExtrinsicBuilder::new_transfer(transfer).build() diff --git a/utils/binary-merkle-tree/Cargo.toml b/utils/binary-merkle-tree/Cargo.toml index a54dc4f2d0497..4b7b9e53ef872 100644 --- a/utils/binary-merkle-tree/Cargo.toml +++ b/utils/binary-merkle-tree/Cargo.toml @@ -9,20 +9,24 @@ description = "A no-std/Substrate compatible library to construct binary merkle homepage = "https://substrate.io" [dependencies] -array-bytes = { version = "4.1", optional = true } +array-bytes = { version = "6.1", optional = true } log = { version = "0.4", default-features = false, optional = true } hash-db = { version = "0.16.0", default-features = false } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" env_logger = "0.9" -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } [features] debug = ["array-bytes", "log"] default = ["debug", "std"] std = [ - "log/std", - "hash-db/std" + + "log/std", + + "hash-db/std", + "sp-core/std", + "sp-runtime/std" ] diff --git a/utils/binary-merkle-tree/src/lib.rs b/utils/binary-merkle-tree/src/lib.rs index 43c07cb60a045..0efab9186c25f 100644 --- a/utils/binary-merkle-tree/src/lib.rs +++ b/utils/binary-merkle-tree/src/lib.rs @@ -27,8 +27,7 @@ //! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the //! same hasher as the inner nodes. //! Inner nodes are created by concatenating child hashes and hashing again. The implementation -//! sorts each pair of hashes before every hash operation. This makes proof verification more -//! efficient by removing the need to track which side each intermediate hash is concatenated on. +//! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! //! If the number of leaves is not even, last leaf (hash of) is promoted to the upper layer. #[cfg(not(feature = "std"))] @@ -48,7 +47,7 @@ use hash_db::Hasher; pub fn merkle_root(leaves: I) -> H::Out where H: Hasher, - H::Out: Default + AsRef<[u8]> + PartialOrd, + H::Out: Default + AsRef<[u8]>, I: IntoIterator, I::Item: AsRef<[u8]>, { @@ -59,7 +58,7 @@ where fn merkelize(leaves: I, visitor: &mut V) -> H::Out where H: Hasher, - H::Out: Default + AsRef<[u8]> + PartialOrd, + H::Out: Default + AsRef<[u8]>, V: Visitor, I: Iterator, { @@ -144,7 +143,7 @@ impl Visitor for () { pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where H: Hasher, - H::Out: Default + Copy + AsRef<[u8]> + PartialOrd, + H::Out: Default + Copy + AsRef<[u8]>, I: IntoIterator, I::IntoIter: ExactSizeIterator, T: AsRef<[u8]>, @@ -203,7 +202,7 @@ where collect_proof .proof .iter() - .map(|s| array_bytes::bytes2hex("", s.as_ref())) + .map(|s| array_bytes::bytes2hex("", s)) .collect::>() ); @@ -244,7 +243,7 @@ pub fn verify_proof<'a, H, P, L>( ) -> bool where H: Hasher, - H::Out: PartialEq + AsRef<[u8]> + PartialOrd, + H::Out: PartialEq + AsRef<[u8]>, P: IntoIterator, L: Into>, { @@ -259,23 +258,27 @@ where let hash_len = ::LENGTH; let mut combined = vec![0_u8; hash_len * 2]; + let mut position = leaf_index; + let mut width = number_of_leaves; let computed = proof.into_iter().fold(leaf_hash, |a, b| { - if a < b { - combined[..hash_len].copy_from_slice(&a.as_ref()); - combined[hash_len..].copy_from_slice(&b.as_ref()); - } else { + if position % 2 == 1 || position + 1 == width { combined[..hash_len].copy_from_slice(&b.as_ref()); combined[hash_len..].copy_from_slice(&a.as_ref()); + } else { + combined[..hash_len].copy_from_slice(&a.as_ref()); + combined[hash_len..].copy_from_slice(&b.as_ref()); } let hash = ::hash(&combined); #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - array_bytes::bytes2hex("", &a.as_ref()), - array_bytes::bytes2hex("", &b.as_ref()), - array_bytes::bytes2hex("", &hash.as_ref()), - array_bytes::bytes2hex("", &combined.as_ref()) + array_bytes::bytes2hex("", a), + array_bytes::bytes2hex("", b), + array_bytes::bytes2hex("", hash), + array_bytes::bytes2hex("", &combined) ); + position /= 2; + width = ((width - 1) / 2) + 1; hash }); @@ -294,7 +297,7 @@ fn merkelize_row( ) -> Result> where H: Hasher, - H::Out: AsRef<[u8]> + PartialOrd, + H::Out: AsRef<[u8]>, V: Visitor, I: Iterator, { @@ -313,20 +316,15 @@ where #[cfg(feature = "debug")] log::debug!( " {:?}\n {:?}", - a.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())), - b.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())) + a.as_ref().map(|s| array_bytes::bytes2hex("", s)), + b.as_ref().map(|s| array_bytes::bytes2hex("", s)) ); index += 2; match (a, b) { (Some(a), Some(b)) => { - if a < b { - combined[..hash_len].copy_from_slice(a.as_ref()); - combined[hash_len..].copy_from_slice(b.as_ref()); - } else { - combined[..hash_len].copy_from_slice(b.as_ref()); - combined[hash_len..].copy_from_slice(a.as_ref()); - } + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); next.push(::hash(&combined)); }, @@ -341,7 +339,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() + next.iter().map(|s| array_bytes::bytes2hex("", s)).collect::>() ); return Err(next) }, @@ -366,7 +364,7 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + array_bytes::bytes2hex("", out), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -375,7 +373,7 @@ mod tests { fn should_generate_single_root() { // given let _ = env_logger::try_init(); - let data = vec![array_bytes::hex2array_unchecked::<20>( + let data = vec![array_bytes::hex2array_unchecked::<_, 20>( "E04CC55ebEE1cBCE552f250e85c57B70B2E2625b", )]; @@ -384,7 +382,7 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + array_bytes::bytes2hex("", out), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -394,8 +392,8 @@ mod tests { // given let _ = env_logger::try_init(); let data = vec![ - array_bytes::hex2array_unchecked::<20>("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), - array_bytes::hex2array_unchecked::<20>("25451A4de12dcCc2D166922fA938E900fCc4ED24"), + array_bytes::hex2array_unchecked::<_, 20>("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), + array_bytes::hex2array_unchecked::<_, 20>("25451A4de12dcCc2D166922fA938E900fCc4ED24"), ]; // when @@ -403,7 +401,7 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + array_bytes::bytes2hex("", out), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -412,19 +410,16 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!( - array_bytes::bytes2hex("", &merkle_root::(data).as_ref()), - root - ); + assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); }; test( - "5842148bc6ebeb52af882a317c765fccd3ae80589b21a9b8cbf21abb630e46a7", + "aff1208e69c9e8be9b584b07ebac4e48a1ee9d15ce3afe20b77a4d29e4175aa3", vec!["a", "b", "c"], ); test( - "7b84bec68b13c39798c6c50e9e40a0b268e3c1634db8f4cb97314eb243d4c514", + "b8912f7269068901f231a965adfefbc10f0eedcfa61852b103efd54dac7db3d7", vec!["a", "b", "a"], ); @@ -434,7 +429,7 @@ mod tests { ); test( - "cc50382cfd3c9a617741e9a85efee8752b8feb95a2cbecd6365fb21366ce0c8c", + "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239", vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ); } @@ -475,12 +470,12 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", &proof0.root.as_ref()), - array_bytes::bytes2hex("", &proof1.root.as_ref()) + array_bytes::bytes2hex("", &proof0.root), + array_bytes::bytes2hex("", &proof1.root) ); assert_eq!( - array_bytes::bytes2hex("", &proof2.root.as_ref()), - array_bytes::bytes2hex("", &proof1.root.as_ref()) + array_bytes::bytes2hex("", &proof2.root), + array_bytes::bytes2hex("", &proof1.root) ); assert!(!verify_proof::( @@ -752,7 +747,7 @@ mod tests { "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; let root: H256 = array_bytes::hex2array_unchecked( - "7b2c6eebec6e85b2e272325a11c31af71df52bc0534d2d4f903e0ced191f022e", + "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", ) .into(); @@ -764,10 +759,7 @@ mod tests { for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!( - array_bytes::bytes2hex("", &proof.root.as_ref()), - array_bytes::bytes2hex("", &root.as_ref()) - ); + assert_eq!(array_bytes::bytes2hex("", &proof.root), array_bytes::bytes2hex("", &root)); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -797,17 +789,17 @@ mod tests { ) .into(), array_bytes::hex2array_unchecked( - "1fad92ed8d0504ef6c0231bbbeeda960a40693f297c64e87b582beb92ecfb00f" + "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" ) .into(), array_bytes::hex2array_unchecked( - "0b84c852cbcf839d562d826fd935e1b37975ccaa419e1def8d219df4b83dcbf4" + "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" ) .into(), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, - leaf: array_bytes::hex2array_unchecked::<20>( + leaf: array_bytes::hex2array_unchecked::<_, 20>( "c26B34D375533fFc4c5276282Fa5D660F3d8cbcB" ) .to_vec(), diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 7150b4fa5adcb..35096f282ef0c 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -13,4 +13,3 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -platforms = "2.0" diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index c60ef8fd33e82..ece7cac8fd308 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -14,4 +14,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index e3ea5682d9598..b7970613d1a11 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" +array-bytes = "6.1" chrono = "0.4" clap = { version = "4.2.5", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2" } -comfy-table = { version = "6.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1" } +comfy-table = { version = "7.0.0", default-features = false } handlebars = "4.2.2" Inflector = "0.11.4" itertools = "0.10.3" @@ -26,7 +26,7 @@ linked-hash-map = "0.5.4" log = "0.4.17" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" -serde = "1.0.136" +serde = "1.0.163" serde_json = "1.0.85" thiserror = "1.0.30" thousands = "0.2.0" @@ -42,19 +42,27 @@ sc-service = { version = "0.10.0-dev", default-features = false, path = "../../. sc-sysinfo = { version = "6.0.0-dev", path = "../../../client/sysinfo" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-database = { version = "4.0.0-dev", path = "../../../primitives/database" } -sp-externalities = { version = "0.13.0", path = "../../../primitives/externalities" } +sp-externalities = { version = "0.19.0", path = "../../../primitives/externalities" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } -sp-std = { version = "5.0.0", path = "../../../primitives/std" } -sp-storage = { version = "7.0.0", path = "../../../primitives/storage" } -sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } +sp-storage = { version = "13.0.0", path = "../../../primitives/storage" } +sp-trie = { version = "22.0.0", path = "../../../primitives/trie" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-wasm-interface = { version = "14.0.0", path = "../../../primitives/wasm-interface" } gethostname = "0.2.3" [features] default = ["rocksdb"] -runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] +runtime-benchmarks = [ + "sc-client-db/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" +] rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 960056991a190..c9a7fb1ad33df 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -77,7 +77,7 @@ where + UsageProvider + BlockBackend + HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { /// Returns a new [`Self`] from the arguments. pub fn new(client: Arc, params: BenchmarkParams) -> Self { diff --git a/utils/frame/benchmarking-cli/src/block/cmd.rs b/utils/frame/benchmarking-cli/src/block/cmd.rs index 0192372fa33a7..ee12c1c5dac33 100644 --- a/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -39,12 +39,12 @@ use super::bench::{Benchmark, BenchmarkParams}; /// did not use more weight than declared which would otherwise be an issue. /// To test this with a dev node, first create one with a temp directory: /// -/// $ substrate --dev -d /tmp/my-dev --execution wasm --wasm-execution compiled +/// $ substrate --dev -d /tmp/my-dev --wasm-execution compiled /// /// And wait some time to let it produce 3 blocks. Then benchmark them with: /// /// $ substrate benchmark-block --from 1 --to 3 --dev -d /tmp/my-dev -/// --execution wasm --wasm-execution compiled --pruning archive +/// --wasm-execution compiled --pruning archive /// /// The output will be similar to this: /// @@ -90,7 +90,7 @@ impl BlockCmd { + StorageProvider + UsageProvider + HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { // Put everything in the benchmark type to have the generic types handy. Benchmark::new(client, self.params.clone()).run() diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index facde14adab59..693b9f99f08e8 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -76,7 +76,7 @@ where C: BlockBuilderProvider + ProvideRuntimeApi + sp_blockchain::HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { /// Create a new [`Self`] from the arguments. pub fn new( diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 1001958fe0d28..4c3a6ed1bcd78 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -97,7 +97,7 @@ impl ExtrinsicCmd { C: BlockBuilderProvider + ProvideRuntimeApi + sp_blockchain::HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { // Short circuit if --list was specified. if self.params.list { diff --git a/utils/frame/benchmarking-cli/src/machine/hardware.rs b/utils/frame/benchmarking-cli/src/machine/hardware.rs index b3cbc0053c3f5..5a4b7c797b6f1 100644 --- a/utils/frame/benchmarking-cli/src/machine/hardware.rs +++ b/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -25,9 +25,6 @@ lazy_static! { /// /// These values are provided by Parity, however it is possible /// to use your own requirements if you are running a custom chain. - /// - /// The reference hardware is describe here: - /// pub static ref SUBSTRATE_REFERENCE_HARDWARE: Requirements = { let raw = include_bytes!("reference_hardware.json").as_slice(); serde_json::from_slice(raw).expect("Hardcoded data is known good; qed") @@ -37,8 +34,9 @@ lazy_static! { #[cfg(test)] mod tests { use super::*; + use sc_sysinfo::{Metric, Requirement, Requirements, Throughput}; - /// `SUBSTRATE_REFERENCE_HARDWARE` can be en- and decoded. + /// `SUBSTRATE_REFERENCE_HARDWARE` can be decoded. #[test] fn json_static_data() { let raw = serde_json::to_string(&*SUBSTRATE_REFERENCE_HARDWARE).unwrap(); @@ -46,4 +44,25 @@ mod tests { assert_eq!(decoded, SUBSTRATE_REFERENCE_HARDWARE.clone()); } + + /// The hard-coded values are correct. + #[test] + fn json_static_data_is_correct() { + assert_eq!( + *SUBSTRATE_REFERENCE_HARDWARE, + Requirements(vec![ + Requirement { metric: Metric::Blake2256, minimum: Throughput::from_mibs(783.27) }, + Requirement { + metric: Metric::Sr25519Verify, + minimum: Throughput::from_kibs(560.670000128), + }, + Requirement { + metric: Metric::MemCopy, + minimum: Throughput::from_gibs(11.4925205078125003), + }, + Requirement { metric: Metric::DiskSeqWrite, minimum: Throughput::from_mibs(950.0) }, + Requirement { metric: Metric::DiskRndWrite, minimum: Throughput::from_mibs(420.0) }, + ]) + ); + } } diff --git a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json index 2a451d31403f1..c2fb4c7d4a285 100644 --- a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json +++ b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json @@ -1,22 +1,22 @@ [ { "metric": "Blake2256", - "minimum": 1029.0 + "minimum": 783.27 }, { "metric": "Sr25519Verify", - "minimum": 0.650391 + "minimum": 0.547529297 }, { "metric": "MemCopy", - "minimum": 14666.752 + "minimum": 11768.341 }, { "metric": "DiskSeqWrite", - "minimum": 450.0 + "minimum": 950.0 }, { "metric": "DiskRndWrite", - "minimum": 200.0 + "minimum": 420.0 } ] diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index 85bcc7fa36f2d..390bc09e41701 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -103,12 +103,12 @@ Writing weights to "extrinsic_weights.rs" The complete command for Polkadot looks like this: ```sh -cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --execution=wasm --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ +cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. You can try the same for *Rococo* and to see that the results slightly differ. -👉 It is paramount to use `--profile=production`, `--execution=wasm` and `--wasm-execution=compiled` as the results are otherwise useless. +👉 It is paramount to use `--profile=production` and `--wasm-execution=compiled` as the results are otherwise useless. ## Output Interpretation @@ -122,7 +122,6 @@ Minimizing this is important to have a large transaction throughput. - `--weight-path` Set the output directory or file to write the weights to. - `--repeat` Set the repetitions of both benchmarks. - `--warmup` Set the rounds of warmup before measuring. -- `--execution` Should be set to `wasm` for correct results. - `--wasm-execution` Should be set to `compiled` for correct results. - [`--mul`](../shared/README.md#arguments) - [`--add`](../shared/README.md#arguments) diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 70e64cc2b66ad..5a4c37b1f6f07 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -111,7 +111,7 @@ impl OverheadCmd { C: BlockBuilderProvider + ProvideRuntimeApi + sp_blockchain::HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, + C::Api: ApiExt + BlockBuilderApi, { if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 08bcf3ee5a6b5..84da3aaa02c00 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -23,12 +23,10 @@ use frame_benchmarking::{ }; use frame_support::traits::StorageInfo; use linked_hash_map::LinkedHashMap; -use sc_cli::{ - execution_method_from_cli, CliConfiguration, ExecutionStrategy, Result, SharedParams, -}; +use sc_cli::{execution_method_from_cli, CliConfiguration, Result, SharedParams}; use sc_client_db::BenchmarkingState; -use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; -use sc_service::{Configuration, NativeExecutionDispatch}; +use sc_executor::WasmExecutor; +use sc_service::Configuration; use serde::Serialize; use sp_core::{ offchain::{ @@ -143,13 +141,22 @@ not created by a node that was compiled with the flag"; impl PalletCmd { /// Runs the command and benchmarks the chain. - pub fn run(&self, config: Configuration) -> Result<()> + pub fn run(&self, config: Configuration) -> Result<()> where BB: BlockT + Debug, <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - ExecDispatch: NativeExecutionDispatch + 'static, + ExtraHostFunctions: sp_wasm_interface::HostFunctions, { + let _d = self.execution.as_ref().map(|exec| { + // We print the warning at the end, since there is often A LOT of output. + sp_core::defer::DeferGuard::new(move || { + log::warn!( + target: LOG_TARGET, + "⚠️ Argument `--execution` is deprecated. Its value of `{exec}` has on effect.", + ) + }) + }); + if let Some(output_path) = &self.output { if !output_path.is_dir() && output_path.file_name().is_none() { return Err("Output file or path is invalid!".into()) @@ -183,7 +190,6 @@ impl PalletCmd { } let spec = config.chain_spec; - let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); let pallet = self.pallet.clone().unwrap_or_default(); let pallet = pallet.as_bytes(); let extrinsic = self.extrinsic.clone().unwrap_or_default(); @@ -213,13 +219,15 @@ impl PalletCmd { let method = execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy); - let executor = NativeElseWasmExecutor::::new_with_wasm_executor( - WasmExecutor::builder() - .with_execution_method(method) - .with_max_runtime_instances(2) - .with_runtime_cache_size(2) - .build(), - ); + let executor = WasmExecutor::<( + sp_io::SubstrateHostFunctions, + frame_benchmarking::benchmarking::HostFunctions, + ExtraHostFunctions, + )>::builder() + .with_execution_method(method) + .with_max_runtime_instances(2) + .with_runtime_cache_size(2) + .build(); let extensions = || -> Extensions { let mut extensions = Extensions::default(); @@ -242,11 +250,11 @@ impl PalletCmd { &executor, "Benchmark_benchmark_metadata", &(self.extra).encode(), - extensions(), + &mut extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?, CallContext::Offchain, ) - .execute(strategy.into()) + .execute() .map_err(|e| format!("{}: {}", ERROR_METADATA_NOT_FOUND, e))?; let (list, storage_info) = @@ -378,12 +386,12 @@ impl PalletCmd { 1, // no need to do internal repeats ) .encode(), - extensions(), + &mut extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, CallContext::Offchain, ) - .execute(strategy.into()) + .execute() .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; @@ -418,12 +426,12 @@ impl PalletCmd { self.repeat, ) .encode(), - extensions(), + &mut extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, CallContext::Offchain, ) - .execute(strategy.into()) + .execute() .map_err(|e| format!("Error executing runtime benchmark: {}", e))?; let batch = @@ -450,12 +458,12 @@ impl PalletCmd { self.repeat, ) .encode(), - extensions(), + &mut extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(state) .runtime_code()?, CallContext::Offchain, ) - .execute(strategy.into()) + .execute() .map_err(|e| format!("Error executing runtime benchmark: {}", e))?; let batch = @@ -625,12 +633,6 @@ impl PalletCmd { println!("{}", comment); } println!(); - - println!("-- Proof Sizes --\n"); - for result in batch.db_results.iter() { - println!("{} bytes", result.proof_size); - } - println!(); } // Conduct analysis. diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index f214569051d45..c69ce1765fc9d 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -20,8 +20,8 @@ mod writer; use crate::shared::HostInfoParams; use sc_cli::{ - ExecutionStrategy, WasmExecutionMethod, WasmtimeInstantiationStrategy, - DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, + WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, + DEFAULT_WASM_EXECUTION_METHOD, }; use std::{fmt::Debug, path::PathBuf}; @@ -43,7 +43,7 @@ pub struct PalletCmd { pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[arg(short, long, default_value_t = 2)] + #[arg(short, long, default_value_t = 50)] pub steps: u32, /// Indicates lowest values for each of the component ranges. @@ -55,7 +55,7 @@ pub struct PalletCmd { pub highest_range_values: Vec, /// Select how many repetitions of this benchmark should run from within the wasm. - #[arg(short, long, default_value_t = 1)] + #[arg(short, long, default_value_t = 20)] pub repeat: u32, /// Select how many repetitions of this benchmark should run from the client. @@ -129,10 +129,6 @@ pub struct PalletCmd { #[clap(flatten)] pub shared_params: sc_cli::SharedParams, - /// The execution strategy that should be used for benchmarks. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] - pub execution: Option, - /// Method for executing Wasm runtime code. #[arg( long = "wasm-execution", @@ -154,6 +150,10 @@ pub struct PalletCmd { )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, + /// DEPRECATED: This argument has no effect. + #[arg(long = "execution")] + pub execution: Option, + /// Limit the memory the database cache can use. #[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)] pub database_cache_size: u32, @@ -195,4 +195,10 @@ pub struct PalletCmd { /// the analysis is read from this file. #[arg(long)] pub json_input: Option, + + /// Allow overwriting a single file with multiple results. + /// + /// This exists only to restore legacy behaviour. It should never actually be needed. + #[arg(long)] + pub unsafe_overwrite_results: bool, } diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 85b0e86caad96..1e5e294acba26 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -5,7 +5,7 @@ //! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` //! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` -//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} +//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: {{cmd.db_cache}} // Executed Command: {{#each args as |arg|}} diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index 89cce45627e68..69c95d13c0985 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -90,7 +90,6 @@ struct CmdData { repeat: u32, lowest_range_values: Vec, highest_range_values: Vec, - execution: String, wasm_execution: String, chain: String, db_cache: u32, @@ -375,7 +374,7 @@ fn get_benchmark_data( } } -// Create weight file from benchmark data and Handlebars template. +/// Create weight file from benchmark data and Handlebars template. pub(crate) fn write_results( batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo], @@ -384,7 +383,7 @@ pub(crate) fn write_results( default_pov_mode: PovEstimationMode, path: &PathBuf, cmd: &PalletCmd, -) -> Result<(), std::io::Error> { +) -> Result<(), sc_cli::Error> { // Use custom template if provided. let template: String = match &cmd.template { Some(template_file) => fs::read_to_string(template_file)?, @@ -425,7 +424,6 @@ pub(crate) fn write_results( repeat: cmd.repeat, lowest_range_values: cmd.lowest_range_values.clone(), highest_range_values: cmd.highest_range_values.clone(), - execution: format!("{:?}", cmd.execution), wasm_execution: cmd.wasm_method.to_string(), chain: format!("{:?}", cmd.shared_params.chain), db_cache: cmd.database_cache_size, @@ -492,10 +490,21 @@ pub(crate) fn write_results( created_files.push(file_path); } - for file in created_files.iter().duplicates() { - // This can happen when there are multiple instances of a pallet deployed - // and `--output` forces the output of all instances into the same file. - println!("Multiple benchmarks were written to the same file: {:?}.", file); + let overwritten_files = created_files.iter().duplicates().collect::>(); + if !overwritten_files.is_empty() { + let msg = format!( + "Multiple results were written to the same file. This can happen when \ + there are multiple instances of a pallet deployed and `--output` forces the output of all \ + instances into the same file. Use `--unsafe-overwrite-results` to ignore this error. The \ + affected files are: {:?}", + overwritten_files + ); + + if cmd.unsafe_overwrite_results { + println!("{msg}"); + } else { + return Err(msg.into()) + } } Ok(()) } @@ -655,7 +664,7 @@ pub(crate) fn process_storage_results( match key_info { Some(key_info) => { let comment = format!( - "Storage: {} {} (r:{} w:{})", + "Storage: `{}::{}` (r:{} w:{})", String::from_utf8(key_info.pallet_name.clone()) .expect("encoded from string"), String::from_utf8(key_info.storage_name.clone()) @@ -667,7 +676,7 @@ pub(crate) fn process_storage_results( }, None => { let comment = format!( - "Storage: unknown `0x{}` (r:{} w:{})", + "Storage: UNKNOWN KEY `0x{}` (r:{} w:{})", HexDisplay::from(key), reads, writes, @@ -689,7 +698,7 @@ pub(crate) fn process_storage_results( ) { Some(new_pov) => { let comment = format!( - "Proof: {} {} (max_values: {:?}, max_size: {:?}, added: {}, mode: {:?})", + "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", String::from_utf8(key_info.pallet_name.clone()) .expect("encoded from string"), String::from_utf8(key_info.storage_name.clone()) @@ -707,7 +716,7 @@ pub(crate) fn process_storage_results( let item = String::from_utf8(key_info.storage_name.clone()) .expect("encoded from string"); let comment = format!( - "Proof Skipped: {} {} (max_values: {:?}, max_size: {:?}, mode: {:?})", + "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)", pallet, item, key_info.max_values, key_info.max_size, used_pov_mode, ); @@ -717,7 +726,7 @@ pub(crate) fn process_storage_results( }, None => { let comment = format!( - "Proof Skipped: unknown `0x{}` (r:{} w:{})", + "Proof: UNKNOWN KEY `0x{}` (r:{} w:{})", HexDisplay::from(key), reads, writes, diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 99cadbe8ec34e..307c9207fdaf9 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -21,7 +21,7 @@ use sc_client_db::DbHash; use sc_service::Configuration; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Database}; -use sp_runtime::traits::{Block as BlockT, HashFor}; +use sp_runtime::traits::{Block as BlockT, HashingFor}; use sp_state_machine::Storage; use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; @@ -126,7 +126,7 @@ impl StorageCmd { cfg: Configuration, client: Arc, db: (Arc>, ColumnId), - storage: Arc>>, + storage: Arc>>, ) -> Result<()> where BA: ClientBackend, diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index faca3b536b22e..4def1909ead5e 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -21,7 +21,7 @@ use sc_client_db::{DbHash, DbState, DbStateBuilder}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; -use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use sp_trie::PrefixedMemoryDB; use log::{info, trace}; @@ -43,7 +43,7 @@ impl StorageCmd { &self, client: Arc, (db, state_col): (Arc>, ColumnId), - storage: Arc>>, + storage: Arc>>, ) -> Result where Block: BlockT

+ Debug, @@ -164,7 +164,7 @@ impl StorageCmd { /// `invert_inserts` replaces all inserts with removals. fn convert_tx( db: Arc>, - mut tx: PrefixedMemoryDB>, + mut tx: PrefixedMemoryDB>, invert_inserts: bool, col: ColumnId, ) -> Transaction { diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 3e736b7f585e9..f2665cde51421 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -15,8 +15,8 @@ clap = { version = "4.2.5", features = ["derive"] } frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [features] default = [] diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index a24af6a2704ae..e8d7d51ead897 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -14,8 +14,8 @@ frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-staking = { version = "4.0.0-dev", path = "../../../frame/staking" } +sp-staking = { version = "4.0.0-dev", path = "../../../primitives/staking" } # third party chrono = { version = "0.4.19" } -git2 = { version = "0.16.0", default-features = false } num-format = "0.4.3" diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index 509ae5530eae1..923017261a44b 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -18,8 +18,7 @@ //! Support code to ease the process of generating bag thresholds. //! //! NOTE: this assume the runtime implements [`pallet_staking::Config`], as it requires an -//! implementation of the traits [`frame_support::traits::Currency`] and -//! [`frame_support::traits::CurrencyToVote`]. +//! implementation of the traits [`frame_support::traits::Currency`] and `CurrencyToVote`. //! //! The process of adding bags to a runtime requires only four steps. //! @@ -70,7 +69,7 @@ fn existential_weight( total_issuance: u128, minimum_balance: u128, ) -> VoteWeight { - use frame_support::traits::CurrencyToVote; + use sp_staking::currency_to_vote::CurrencyToVote; T::CurrencyToVote::to_vote( minimum_balance @@ -89,8 +88,11 @@ fn existential_weight( /// Just searches the git working directory root for files matching certain patterns; it's /// pretty naive. fn path_to_header_file() -> Option { - let repo = git2::Repository::open_from_env().ok()?; - let workdir = repo.workdir()?; + let mut workdir: &Path = &std::env::current_dir().ok()?; + while !workdir.join(".git").exists() { + workdir = workdir.parent()?; + } + for file_name in &["HEADER-APACHE2", "HEADER-GPL3", "HEADER", "file_header.txt"] { let path = workdir.join(file_name); if path.exists() { diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index b723c9babe640..75f25bf322f90 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -13,13 +13,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.16.2", features = ["http-client"] } -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } log = "0.4.17" -serde = "1.0.136" -frame-support = { version = "4.0.0-dev", optional = true, path = "../../../frame/support" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sp-io = { version = "7.0.0", path = "../../../primitives/io" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +serde = "1.0.163" +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" } +sp-io = { version = "23.0.0", path = "../../../primitives/io" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } futures = "0.3" @@ -29,9 +29,7 @@ spinners = "4.1.0" tokio-retry = "0.3.0" [dev-dependencies] -frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } -pallet-elections-phragmen = { version = "5.0.0-dev", path = "../../../frame/elections-phragmen" } -tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } [features] -remote-test = ["frame-support"] +remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 9a64f4ffddfe3..072ea6ef5e597 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -21,7 +21,7 @@ //! based chain, or a local state snapshot file. use async_recursion::async_recursion; -use codec::{Decode, Encode}; +use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; use jsonrpsee::{ core::params::ArrayParams, @@ -36,10 +36,12 @@ use sp_core::{ well_known_keys::{is_default_child_storage_key, DEFAULT_CHILD_STORAGE_KEY_PREFIX}, ChildInfo, ChildType, PrefixedStorageKey, StorageData, StorageKey, }, - H256, }; -pub use sp_io::TestExternalities; -use sp_runtime::{traits::Block as BlockT, StateVersion}; +use sp_runtime::{ + traits::{Block as BlockT, HashingFor}, + StateVersion, +}; +use sp_state_machine::TestExternalities; use spinners::{Spinner, Spinners}; use std::{ cmp::max, @@ -54,29 +56,65 @@ use tokio_retry::{strategy::FixedInterval, Retry}; type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; type ChildKeyValues = Vec<(ChildInfo, Vec)>; +type SnapshotVersion = Compact; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_HTTP_ENDPOINT: &str = "https://rpc.polkadot.io:443"; +const SNAPSHOT_VERSION: SnapshotVersion = Compact(3); + /// The snapshot that we store on disk. #[derive(Decode, Encode)] struct Snapshot { + snapshot_version: SnapshotVersion, state_version: StateVersion, block_hash: B::Hash, - raw_storage: Vec<(H256, Vec)>, - storage_root: H256, + // > + raw_storage: Vec<(Vec, (Vec, i32))>, + storage_root: B::Hash, +} + +impl Snapshot { + pub fn new( + state_version: StateVersion, + block_hash: B::Hash, + raw_storage: Vec<(Vec, (Vec, i32))>, + storage_root: B::Hash, + ) -> Self { + Self { + snapshot_version: SNAPSHOT_VERSION, + state_version, + block_hash, + raw_storage, + storage_root, + } + } + + fn load(path: &PathBuf) -> Result, &'static str> { + let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; + // The first item in the SCALE encoded struct bytes is the snapshot version. We decode and + // check that first, before proceeding to decode the rest of the snapshot. + let snapshot_version = SnapshotVersion::decode(&mut &*bytes) + .map_err(|_| "Failed to decode snapshot version")?; + + if snapshot_version != SNAPSHOT_VERSION { + return Err("Unsupported snapshot version detected. Please create a new snapshot.") + } + + Decode::decode(&mut &*bytes).map_err(|_| "Decode failed") + } } /// An externalities that acts exactly the same as [`sp_io::TestExternalities`] but has a few extra /// bits and pieces to it, and can be loaded remotely. pub struct RemoteExternalities { /// The inner externalities. - pub inner_ext: TestExternalities, + pub inner_ext: TestExternalities>, /// The block hash it which we created this externality env. pub block_hash: B::Hash, } impl Deref for RemoteExternalities { - type Target = TestExternalities; + type Target = TestExternalities>; fn deref(&self) -> &Self::Target { &self.inner_ext } @@ -276,8 +314,6 @@ pub struct Builder { overwrite_state_version: Option, } -// NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for -// that. impl Default for Builder { fn default() -> Self { Self { @@ -533,7 +569,7 @@ where &self, prefix: StorageKey, at: B::Hash, - pending_ext: &mut TestExternalities, + pending_ext: &mut TestExternalities>, ) -> Result, &'static str> { let start = Instant::now(); let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into()); @@ -541,7 +577,6 @@ where .rpc_get_keys_paged(prefix.clone(), at) .await? .into_iter() - .filter(|k| !is_default_child_storage_key(&k.0)) .collect::>(); sp.stop_with_message(format!( "✅ Found {} keys ({:.2}s)", @@ -609,7 +644,14 @@ where let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); let start = Instant::now(); - pending_ext.batch_insert(key_values.clone().into_iter().map(|(k, v)| (k.0, v.0))); + pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { + // Don't insert the child keys here, they need to be inserted seperately with all their + // data in the load_child_remote function. + match is_default_child_storage_key(&k.0) { + true => None, + false => Some((k.0, v.0)), + } + })); sp.stop_with_message(format!( "✅ Inserted keys into DB ({:.2}s)", start.elapsed().as_secs_f32() @@ -719,7 +761,7 @@ where async fn load_child_remote( &self, top_kv: &[KeyValue], - pending_ext: &mut TestExternalities, + pending_ext: &mut TestExternalities>, ) -> Result { let child_roots = top_kv .into_iter() @@ -727,6 +769,7 @@ where .collect::>(); if child_roots.is_empty() { + info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape",); return Ok(Default::default()) } @@ -776,7 +819,7 @@ where /// cache, we can also optimize further. async fn load_top_remote( &self, - pending_ext: &mut TestExternalities, + pending_ext: &mut TestExternalities>, ) -> Result { let config = self.as_online(); let at = self @@ -876,7 +919,9 @@ where /// `load_child_remote`. /// /// Must be called after `init_remote_client`. - async fn load_remote_and_maybe_save(&mut self) -> Result { + async fn load_remote_and_maybe_save( + &mut self, + ) -> Result>, &'static str> { let state_version = StateApi::::runtime_version(self.as_online().rpc_client(), None) .await @@ -898,15 +943,14 @@ where // If we need to save a snapshot, save the raw storage and root hash to the snapshot. if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) { let (raw_storage, storage_root) = pending_ext.into_raw_snapshot(); - let snapshot = Snapshot:: { + let snapshot = Snapshot::::new( state_version, - block_hash: self - .as_online() + self.as_online() .at .expect("set to `Some` in `init_remote_client`; must be called before; qed"), - raw_storage: raw_storage.clone(), + raw_storage.clone(), storage_root, - }; + ); let encoded = snapshot.encode(); log::info!( target: LOG_TARGET, @@ -917,24 +961,16 @@ where std::fs::write(path, encoded).map_err(|_| "fs::write failed")?; // pending_ext was consumed when creating the snapshot, need to reinitailize it - let mut pending_ext = TestExternalities::new_with_code_and_state( - Default::default(), - Default::default(), + return Ok(TestExternalities::from_raw_snapshot( + raw_storage, + storage_root, self.overwrite_state_version.unwrap_or(state_version), - ); - pending_ext.from_raw_snapshot(raw_storage, storage_root); - return Ok(pending_ext) + )) } Ok(pending_ext) } - fn load_snapshot(&mut self, path: PathBuf) -> Result, &'static str> { - info!(target: LOG_TARGET, "loading data from snapshot {:?}", path); - let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; - Decode::decode(&mut &*bytes).map_err(|_| "decode failed") - } - async fn do_load_remote(&mut self) -> Result, &'static str> { self.init_remote_client().await?; let block_hash = self.as_online().at_expected(); @@ -948,15 +984,15 @@ where ) -> Result, &'static str> { let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into()); let start = Instant::now(); - let Snapshot { block_hash, state_version, raw_storage, storage_root } = - self.load_snapshot(config.state_snapshot.path.clone())?; + info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); + let Snapshot { snapshot_version: _, block_hash, state_version, raw_storage, storage_root } = + Snapshot::::load(&config.state_snapshot.path)?; - let mut inner_ext = TestExternalities::new_with_code_and_state( - Default::default(), - Default::default(), + let inner_ext = TestExternalities::from_raw_snapshot( + raw_storage, + storage_root, self.overwrite_state_version.unwrap_or(state_version), ); - inner_ext.from_raw_snapshot(raw_storage, storage_root); sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32())); Ok(RemoteExternalities { inner_ext, block_hash }) @@ -1055,17 +1091,12 @@ where #[cfg(test)] mod test_prelude { - use tracing_subscriber::EnvFilter; - pub(crate) use super::*; pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; pub(crate) type Block = RawBlock>; pub(crate) fn init_logger() { - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .with_level(true) - .try_init(); + let _ = sp_tracing::try_init_simple(); } } @@ -1190,6 +1221,42 @@ mod remote_tests { assert_eq!(ext.block_hash, cached_ext.block_hash); } + #[tokio::test] + async fn child_keys_are_loaded() { + const CACHE: &'static str = "snapshot_retains_storage"; + init_logger(); + + // create an ext with children keys + let child_ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + pallets: vec!["Proxy".to_owned()], + child_trie: true, + state_snapshot: Some(SnapshotConfig::new(CACHE)), + ..Default::default() + })) + .build() + .await + .unwrap(); + + // create an ext without children keys + let ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + pallets: vec!["Proxy".to_owned()], + child_trie: false, + state_snapshot: Some(SnapshotConfig::new(CACHE)), + ..Default::default() + })) + .build() + .await + .unwrap(); + + // there should be more keys in the child ext. + assert!( + child_ext.as_backend().backend_storage().keys().len() > + ext.as_backend().backend_storage().keys().len() + ); + } + #[tokio::test] async fn offline_else_online_works() { const CACHE: &'static str = "offline_else_online_works_data"; @@ -1289,9 +1356,6 @@ mod remote_tests { .filter(|p| p.path().file_name().unwrap_or_default() == CACHE) .collect::>(); - let snap: Snapshot = Builder::::new().load_snapshot(CACHE.into()).unwrap(); - assert!(matches!(snap, Snapshot { raw_storage, .. } if raw_storage.len() > 0)); - assert!(to_delete.len() == 1); let to_delete = to_delete.first().unwrap(); assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1); @@ -1321,9 +1385,6 @@ mod remote_tests { .filter(|p| p.path().file_name().unwrap_or_default() == CACHE) .collect::>(); - let snap: Snapshot = Builder::::new().load_snapshot(CACHE.into()).unwrap(); - assert!(matches!(snap, Snapshot { raw_storage, .. } if raw_storage.len() > 0)); - assert!(to_delete.len() == 1); let to_delete = to_delete.first().unwrap(); assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1); diff --git a/utils/frame/remote-externalities/test_data/proxy_test b/utils/frame/remote-externalities/test_data/proxy_test index f749531a8a9d7..f0b1b4f5af40b 100644 Binary files a/utils/frame/remote-externalities/test_data/proxy_test and b/utils/frame/remote-externalities/test_data/proxy_test differ diff --git a/utils/frame/rpc/client/Cargo.toml b/utils/frame/rpc/client/Cargo.toml index ee9982971cee3..d39fbbdf48603 100644 --- a/utils/frame/rpc/client/Cargo.toml +++ b/utils/frame/rpc/client/Cargo.toml @@ -16,7 +16,7 @@ jsonrpsee = { version = "0.16.2", features = ["ws-client"] } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } async-trait = "0.1.57" serde = "1" -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } log = "0.4" [dev-dependencies] diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 91f50014667cc..9eee52aacba76 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -13,10 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } serde = { version = "1", features = ["derive"] } -log = { version = "0.4.17", default-features = false } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } @@ -28,7 +26,7 @@ jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] # Substrate Dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1" diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index d75d3a5af5da4..032840d457c35 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -15,17 +15,17 @@ description = "Substrate RPC for FRAME's support" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["jsonrpsee-types"] } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } -sp-storage = { version = "7.0.0", path = "../../../../primitives/storage" } +sp-storage = { version = "13.0.0", path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.1.1" jsonrpsee = { version = "0.16.2", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.22.0" -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index eecc80c408efd..2d8e45cbfc69f 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -40,12 +40,9 @@ use sp_storage::{StorageData, StorageKey}; /// # use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; /// # /// # construct_runtime!( -/// # pub enum TestRuntime where -/// # Block = frame_system::mocking::MockBlock, -/// # NodeBlock = frame_system::mocking::MockBlock, -/// # UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic, +/// # pub enum TestRuntime /// # { -/// # System: frame_system::{Pallet, Call, Config, Storage, Event}, +/// # System: frame_system::{Pallet, Call, Config, Storage, Event}, /// # Test: pallet_test::{Pallet, Storage}, /// # } /// # ); @@ -58,13 +55,12 @@ use sp_storage::{StorageData, StorageKey}; /// # type BlockLength = (); /// # type RuntimeOrigin = RuntimeOrigin; /// # type RuntimeCall = RuntimeCall; -/// # type Index = u64; -/// # type BlockNumber = u64; +/// # type Nonce = u64; /// # type Hash = Hash; /// # type Hashing = BlakeTwo256; /// # type AccountId = u64; /// # type Lookup = IdentityLookup; -/// # type Header = Header; +/// # type Block = frame_system::mocking::MockBlock; /// # type RuntimeEvent = RuntimeEvent; /// # type BlockHashCount = (); /// # type DbWeight = (); @@ -94,7 +90,7 @@ use sp_storage::{StorageData, StorageKey}; /// use frame_support::pallet_prelude::*; /// /// #[pallet::pallet] -/// pub struct Pallet(PhantomData); +/// pub struct Pallet(_); /// /// #[pallet::config] /// pub trait Config: frame_system::Config {} diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index b6848ceb2911e..f93f32ad64f2e 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2" } +codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } futures = "0.3.21" log = "0.4.17" @@ -23,12 +23,12 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../../client/tr sp-api = { version = "4.0.0-dev", path = "../../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", path = "../../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockchain" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] sc-transaction-pool = { version = "4.0.0-dev", path = "../../../../client/transaction-pool" } tokio = "1.22.0" assert_matches = "1.3.0" -sp-tracing = { version = "6.0.0", path = "../../../../primitives/tracing" } +sp-tracing = { version = "10.0.0", path = "../../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 26efa02970efe..1eff71e3390a3 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -38,14 +38,14 @@ pub use frame_system_rpc_runtime_api::AccountNonceApi; /// System RPC methods. #[rpc(client, server)] -pub trait SystemApi { +pub trait SystemApi { /// Returns the next valid index (aka nonce) for given account. /// /// This method takes into consideration all pending transactions /// currently in the pool and if no transactions are found in the pool /// it fallbacks to query the index from the runtime (aka. state nonce). #[method(name = "system_accountNextIndex", aliases = ["account_nextIndex"])] - async fn nonce(&self, account: AccountId) -> RpcResult; + async fn nonce(&self, account: AccountId) -> RpcResult; /// Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. #[method(name = "system_dryRun", aliases = ["system_dryRunAt"])] @@ -85,20 +85,20 @@ impl System { } #[async_trait] -impl - SystemApiServer<::Hash, AccountId, Index> for System +impl + SystemApiServer<::Hash, AccountId, Nonce> for System where C: sp_api::ProvideRuntimeApi, C: HeaderBackend, C: Send + Sync + 'static, - C::Api: AccountNonceApi, + C::Api: AccountNonceApi, C::Api: BlockBuilder, P: TransactionPool + 'static, Block: traits::Block, AccountId: Clone + Display + Codec + Send + 'static, - Index: Clone + Display + Codec + Send + traits::AtLeast32Bit + 'static, + Nonce: Clone + Display + Codec + Send + traits::AtLeast32Bit + 'static, { - async fn nonce(&self, account: AccountId) -> RpcResult { + async fn nonce(&self, account: AccountId) -> RpcResult { let api = self.client.runtime_api(); let best = self.client.info().best_hash; @@ -176,11 +176,11 @@ where /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. -fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index +fn adjust_nonce(pool: &P, account: AccountId, nonce: Nonce) -> Nonce where P: TransactionPool, AccountId: Clone + std::fmt::Display + Encode, - Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, + Nonce: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, { log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); // Now we need to query the transaction pool diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 7107cb15a0788..84b9460d137e0 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -15,23 +15,22 @@ targets = ["x86_64-unknown-linux-gnu"] remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities", package = "frame-remote-externalities" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } -sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../../client/service" } sp-consensus-aura = { path = "../../../../primitives/consensus/aura" } sp-consensus-babe = { path = "../../../../primitives/consensus/babe" } -sp-core = { version = "7.0.0", path = "../../../../primitives/core" } -sp-externalities = { version = "0.13.0", path = "../../../../primitives/externalities" } +sp-core = { version = "21.0.0", path = "../../../../primitives/core" } +sp-externalities = { version = "0.19.0", path = "../../../../primitives/externalities" } sp-inherents = { path = "../../../../primitives/inherents" } -sp-io = { version = "7.0.0", path = "../../../../primitives/io" } -sp-keystore = { version = "0.13.0", path = "../../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" } +sp-io = { version = "23.0.0", path = "../../../../primitives/io" } +sp-keystore = { version = "0.27.0", path = "../../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../../primitives/runtime" } sp-rpc = { version = "6.0.0", path = "../../../../primitives/rpc" } -sp-state-machine = { version = "0.13.0", path = "../../../../primitives/state-machine" } +sp-state-machine = { version = "0.28.0", path = "../../../../primitives/state-machine" } sp-timestamp = { path = "../../../../primitives/timestamp" } sp-transaction-storage-proof = { path = "../../../../primitives/transaction-storage-proof" } -sp-version = { version = "5.0.0", path = "../../../../primitives/version" } +sp-version = { version = "22.0.0", path = "../../../../primitives/version" } sp-debug-derive = { path = "../../../../primitives/debug-derive" } sp-api = { path = "../../../../primitives/api" } -sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } +sp-weights = { version = "20.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" } substrate-rpc-client = { path = "../../rpc/client" } @@ -39,16 +38,23 @@ async-trait = "0.1.57" clap = { version = "4.2.5", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = "0.4.17" -parity-scale-codec = "3.2.2" -serde = "1.0.136" +parity-scale-codec = "3.6.1" +serde = "1.0.163" serde_json = "1.0.85" zstd = { version = "0.12.3", default-features = false } [dev-dependencies] assert_cmd = "2.0.10" +node-primitives = { path = "../../../../bin/node/primitives" } regex = "1.7.3" substrate-cli-test-utils = { path = "../../../../test-utils/cli" } +tempfile = "3.1.0" tokio = "1.27.0" [features] -try-runtime = ["sp-debug-derive/force-debug", "frame-try-runtime/try-runtime"] +try-runtime = [ + "sp-debug-derive/force-debug", + "frame-try-runtime/try-runtime", + "sp-runtime/try-runtime", + "substrate-cli-test-utils/try-runtime" +] diff --git a/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs index 87855c1d6bf0d..102336d644219 100644 --- a/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs +++ b/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs @@ -41,7 +41,7 @@ pub(crate) async fn create_snapshot( ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr + serde::de::DeserializeOwned, + Block::Hash: serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, ::Err: Debug, NumberFor: FromStr, diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 48dab6b9bd1b3..1f1b6ec7d9b91 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -68,7 +68,6 @@ pub struct ExecuteBlockCmd { impl ExecuteBlockCmd { fn block_ws_uri(&self) -> String where - Block::Hash: FromStr, ::Err: Debug, { match (&self.block_ws_uri, &self.state) { @@ -91,7 +90,6 @@ pub(crate) async fn execute_block( ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, ::Err: Debug, Block::Hash: serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, diff --git a/utils/frame/try-runtime/cli/src/commands/fast_forward.rs b/utils/frame/try-runtime/cli/src/commands/fast_forward.rs index 0c517c02fbe81..f1dee16debe73 100644 --- a/utils/frame/try-runtime/cli/src/commands/fast_forward.rs +++ b/utils/frame/try-runtime/cli/src/commands/fast_forward.rs @@ -25,11 +25,11 @@ use sc_executor::{sp_wasm_interface::HostFunctions, WasmExecutor}; use serde::de::DeserializeOwned; use sp_core::H256; use sp_inherents::{InherentData, InherentDataProvider}; -use sp_io::TestExternalities; use sp_runtime::{ - traits::{Header, NumberFor, One}, + traits::{HashingFor, Header, NumberFor, One}, Digest, }; +use sp_state_machine::TestExternalities; use std::{fmt::Debug, str::FromStr}; use substrate_rpc_client::{ws_client, ChainApi}; @@ -92,8 +92,8 @@ where } /// Call `method` with `data` and return the result. `externalities` will not change. -async fn dry_run( - externalities: &TestExternalities, +fn dry_run( + externalities: &TestExternalities>, executor: &WasmExecutor, method: &'static str, data: &[u8], @@ -111,7 +111,7 @@ async fn dry_run( /// Call `method` with `data` and actually save storage changes to `externalities`. async fn run( - externalities: &mut TestExternalities, + externalities: &mut TestExternalities>, executor: &WasmExecutor, method: &'static str, data: &[u8], @@ -124,11 +124,8 @@ async fn run( full_extensions(executor.clone()), )?; - let storage_changes = changes.drain_storage_changes( - &externalities.backend, - &mut Default::default(), - externalities.state_version, - )?; + let storage_changes = + changes.drain_storage_changes(&externalities.backend, externalities.state_version)?; externalities .backend @@ -143,7 +140,7 @@ async fn next_empty_block< HostFns: HostFunctions, BBIP: BlockBuildingInfoProvider>, >( - externalities: &mut TestExternalities, + externalities: &mut TestExternalities>, executor: &WasmExecutor, parent_height: NumberFor, parent_hash: Block::Hash, @@ -182,8 +179,7 @@ async fn next_empty_block< executor, "BlockBuilder_inherent_extrinsics", &inherent_data.encode(), - ) - .await?; + )?; } for xt in &extrinsics { @@ -196,8 +192,7 @@ async fn next_empty_block< executor, "BlockBuilder_finalize_block", &[0u8; 0], - ) - .await?; + )?; run::(externalities, executor, "BlockBuilder_finalize_block", &[0u8; 0]).await?; @@ -211,7 +206,6 @@ pub(crate) async fn fast_forward( ) -> Result<()> where Block: BlockT + DeserializeOwned, - Block::Hash: FromStr, Block::Header: DeserializeOwned, ::Err: Debug, NumberFor: FromStr, diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index d5c68978eb61a..53db5e6434632 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -84,7 +84,6 @@ pub(crate) async fn follow_chain( ) -> sc_cli::Result<()> where Block: BlockT + DeserializeOwned, - Block::Hash: FromStr, Block::Header: DeserializeOwned, ::Err: Debug, NumberFor: FromStr, @@ -178,7 +177,6 @@ where let storage_changes = changes .drain_storage_changes( &state_ext.backend, - &mut Default::default(), // Note that in case a block contains a runtime upgrade, state version could // potentially be incorrect here, this is very niche and would only result in // unaligned roots, so this use case is ignored for now. diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 4da6f07836cd1..ac95384fb8aa5 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -46,7 +46,6 @@ pub struct OffchainWorkerCmd { impl OffchainWorkerCmd { fn header_ws_uri(&self) -> String where - Block::Hash: FromStr, ::Err: Debug, { match (&self.header_ws_uri, &self.state) { @@ -70,7 +69,6 @@ pub(crate) async fn offchain_worker( where Block: BlockT + serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, - Block::Hash: FromStr, ::Err: Debug, NumberFor: FromStr, as FromStr>::Err: Debug, diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 948ff9c030ea9..67988a3d1aada 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -32,9 +32,10 @@ pub struct OnRuntimeUpgradeCmd { /// Select which optional checks to perform. Selects all when no value is given. /// - /// - `none`: Perform no checks (default when the arg is not present). - /// - `all`: Perform all checks (default when the arg is present). - /// - `pre-and-post`: Perform pre- and post-upgrade checks. + /// - `none`: Perform no checks. + /// - `all`: Perform all checks (default when --checks is present with no value). + /// - `pre-and-post`: Perform pre- and post-upgrade checks (default when the arg is not + /// present). /// - `try-state`: Perform the try-state checks. /// /// Performing any checks will potentially invalidate the measured PoV/Weight. @@ -54,7 +55,6 @@ pub(crate) async fn on_runtime_upgrade( ) -> sc_cli::Result<()> where Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, ::Err: Debug, Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 9268ef2edba8b..73952ce816af4 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -17,331 +17,10 @@ //! # Try-runtime //! -//! Substrate's ultimate testing framework for the power users. +//! Substrate's `try-runtime` subcommand has been migrated to a [standalone +//! CLI](https://github.com/paritytech/try-runtime-cli). //! -//! > As the name suggests, `try-runtime` is a detailed testing framework that gives you a lot of -//! control over what is being executed in which environment. It is recommended that user's first -//! familiarize themselves with substrate in depth, particularly the execution model. It is critical -//! to deeply understand how the wasm/client/runtime interactions, and the runtime apis work in the -//! substrate runtime, before commencing to working with `try-runtime`. -//! -//! #### Resources -//! -//! Some resources about the above: -//! -//! 1. -//! 2. -//! 3. -//! -//! --- -//! -//! ## Background Knowledge -//! -//! The basis of all try-runtime commands is the same: connect to a live node, scrape its *state* -//! and put it inside a `TestExternalities`, then call into a *specific runtime-api* using the given -//! state and some *runtime*. -//! -//! Alternatively, the state could come from a snapshot file. -//! -//! All of the variables in the above statement are made *italic*. Let's look at each of them: -//! -//! 1. **State** is the key-value pairs of data that comprise the canonical information that any -//! blockchain is keeping. A state can be full (all key-value pairs), or be partial (only pairs -//! related to some pallets/prefixes). Moreover, some keys are especial and are not related to -//! specific pallets, known as [`well_known_keys`] in substrate. The most important of these is -//! the `:CODE:` key, which contains the code used for execution, when wasm execution is chosen. -//! -//! 2. *A runtime-api* call is a call into a function defined in the runtime, *on top of a given -//! state*. Each subcommand of `try-runtime` utilizes a specific *runtime-api*. -//! -//! 3. Finally, the **runtime** is the actual code that is used to execute the aforementioned -//! runtime-api. Everything in this crate assumes wasm execution, which means the runtime that -//! you use is the one stored onchain, namely under the `:CODE:` key. -//! -//! To recap, a typical try-runtime command does the following: -//! -//! 1. Download the state of a live chain, and write to an `externalities`. -//! 2. Overwrite the `:CODE:` with a given wasm blob -//! 3. Test some functionality via calling a runtime-api. -//! -//! ## Usage -//! -//! To use any of the provided commands, [`SharedParams`] must be provided. The most important of -//! which being [`SharedParams::runtime`], which specifies which runtime to use. Furthermore, -//! [`SharedParams::overwrite_state_version`] can be used to alter the state-version (see -//! for more info). -//! -//! Then, the specific command has to be specified. See [`Command`] for more information about each -//! command's specific customization flags, and assumptions regarding the runtime being used. -//! -//! Said briefly, this CLI is capable of executing: -//! -//! * [`Command::OnRuntimeUpgrade`]: execute all the `on_runtime_upgrade` hooks. -//! * [`Command::ExecuteBlock`]: re-execute the given block. -//! * [`Command::OffchainWorker`]: re-execute the given block's offchain worker code path. -//! * [`Command::FollowChain`]: continuously execute the blocks of a remote chain on top of a given -//! runtime. -//! * [`Command::CreateSnapshot`]: Create a snapshot file from a remote node. -//! -//! Finally, To make sure there are no errors regarding this, always run any `try-runtime` command -//! with `executor=trace` logging targets, which will specify which runtime is being used per api -//! call. Moreover, `remote-ext`, `try-runtime` and `runtime` logs targets will also be useful. -//! -//! ## Spec name check -//! -//! A common pitfall is that you might be running some test on top of the state of chain `x`, with -//! the runtime of chain `y`. To avoid this all commands do a spec-name check before executing -//! anything by default. This will check the, if any alterations are being made to the `:CODE:`, -//! then the spec names match. The spec versions are warned, but are not mandated to match. -//! -//! > If anything, in most cases, we expect spec-versions to NOT match, because try-runtime is all -//! > about testing unreleased runtimes. -//! -//! ## Note on signature and state-root checks -//! -//! All of the commands calling into `TryRuntime_execute_block` ([`Command::ExecuteBlock`] and -//! [`Command::FollowChain`]) disable both state root and signature checks. This is because in 99% -//! of the cases, the runtime that is being tested is different from the one that is stored in the -//! canonical chain state. This implies: -//! -//! 1. the state root will NEVER match, because `:CODE:` is different between the two. -//! 2. replaying all transactions will fail, because the spec-version is part of the transaction -//! signature. -//! -//! ## Best Practices -//! -//! Try-runtime is all about battle-testing unreleased runtime. The following list of suggestions -//! help developers maximize the testing coverage and make base use of `try-runtime`. -//! -//! #### Adding pre/post hooks -//! -//! One of the gems that come only in the `try-runtime` feature flag is the `pre_upgrade` and -//! `post_upgrade` hooks for `OnRuntimeUpgrade`. This trait is implemented either inside the pallet, -//! or manually in a runtime, to define a migration. In both cases, these functions can be added, -//! given the right flag: -//! -//! ```ignore -//! #[cfg(feature = "try-runtime")] -//! fn pre_upgrade() -> Result, &'static str> {} -//! -//! #[cfg(feature = "try-runtime")] -//! fn post_upgrade(state: Vec) -> Result<(), &'static str> {} -//! ``` -//! -//! (The pallet macro syntax will support this simply as a part of `#[pallet::hooks]`). -//! -//! These hooks allow you to execute some code, only within the `on-runtime-upgrade` command, before -//! and after the migration. Moreover, `pre_upgrade` can return a `Vec` that contains arbitrary -//! encoded data (usually some pre-upgrade state) which will be passed to `post_upgrade` after -//! upgrading and used for post checking. -//! -//! ## State Consistency -//! -//! Similarly, each pallet can expose a function in `#[pallet::hooks]` section as follows: -//! -//! ```ignore -//! #[cfg(feature = "try-runtime")] -//! fn try_state(_: BlockNumber) -> Result<(), &'static str> {} -//! ``` -//! -//! which is called on numerous code paths in the try-runtime tool. These checks should ensure that -//! the state of the pallet is consistent and correct. See `frame_support::try_runtime::TryState` -//! for more info. -//! -//! #### Logging -//! -//! It is super helpful to make sure your migration code uses logging (always with a `runtime` log -//! target prefix, e.g. `runtime::balance`) and state exactly at which stage it is, and what it is -//! doing. -//! -//! #### Guarding migrations -//! -//! Always make sure that any migration code is guarded either by `StorageVersion`, or by some -//! custom storage item, so that it is NEVER executed twice, even if the code lives in two -//! consecutive runtimes. -//! -//! ## Examples -//! -//! For the following examples, we assume the existence of the following: -//! -//! 1. a substrate node compiled without `--feature try-runtime`, called `substrate`. This will be -//! the running node that you connect to. then, after some changes to this node, you compile it with -//! `--features try-runtime`. This gives you: -//! 2. a substrate binary that has the try-runtime sub-command enabled. -//! 3. a wasm blob that has try-runtime functionality. -//! -//! ```bash -//! # this is like your running deployed node. -//! cargo build --release && cp target/release/substrate . -//! -//! # this is like your WIP branch. -//! cargo build --release --features try-runtime -//! cp target/release/substrate substrate-try-runtime -//! cp ./target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm runtime-try-runtime.wasm -//! ``` -//! -//! > The above example is with `substrate`'s `kitchensink-runtime`, but is applicable to any -//! > substrate-based chain that has implemented `try-runtime-cli`. -//! -//! * If you run `try-runtime` subcommand against `substrate` binary listed above, you get the -//! following error. -//! -//! ```bash -//! [substrate] ./substrate try-runtime -//! Error: Input("TryRuntime wasn't enabled when building the node. You can enable it with `--features try-runtime`.") -//! ``` -//! -//! * If you run the same against `substrate-try-runtime`, it will work. -//! -//! ```bash -//! [substrate] ./substrate-try-runtime try-runtime -//! Try some command against runtime state -//! -//! Usage: substrate-try-runtime try-runtime [OPTIONS] --runtime -//! -//! Commands: -//! on-runtime-upgrade Execute the migrations of the "local runtime" -//! execute-block Executes the given block against some state -//! offchain-worker Executes *the offchain worker hooks* of a given block against some state -//! follow-chain Follow the given chain's finalized blocks and apply all of its extrinsics -//! create-snapshot Create a new snapshot file -//! help Print this message or the help of the given subcommand(s) -//! -//! Options: -//! --chain -//! Specify the chain specification -//! --dev -//! Specify the development chain -//! -d, --base-path -//! Specify custom base path -//! -l, --log ... -//! Sets a custom logging filter. Syntax is `=`, e.g. -lsync=debug -//! --detailed-log-output -//! Enable detailed log output -//! --disable-log-color -//! Disable log color output -//! --enable-log-reloading -//! Enable feature to dynamically update and reload the log filter -//! --tracing-targets -//! Sets a custom profiling filter. Syntax is the same as for logging: `=` -//! --tracing-receiver -//! Receiver to process tracing messages [default: log] [possible values: log] -//! --runtime -//! The runtime to use -//! --wasm-execution -//! Type of wasm execution used [default: compiled] [possible values: interpreted-i-know-what-i-do, compiled] -//! --wasm-instantiation-strategy -//! The WASM instantiation method to use [default: pooling-copy-on-write] [possible values: pooling-copy-on-write, recreate-instance-copy-on-write, pooling, recreate-instance, legacy-instance-reuse] -//! --heap-pages -//! The number of 64KB pages to allocate for Wasm execution. Defaults to [`sc_service::Configuration.default_heap_pages`] -//! --overwrite-state-version -//! Overwrite the `state_version` -//! -h, --help -//! Print help information (use `--help` for more detail) -//! -V, --version -//! Print version information -//! ``` -//! -//! * Run the migrations of a given runtime on top of a live state. -//! -//! ```bash -//! # assuming there's `./substrate --dev --tmp --ws-port 9999` or similar running. -//! ./substrate-try-runtime \ -//! try-runtime \ -//! --runtime kitchensink_runtime.wasm \ -//! -lruntime=debug \ -//! on-runtime-upgrade \ -//! live --uri ws://localhost:9999 -//! ``` -//! -//! * Same as the previous one, but run it at specific block number's state. This means that this -//! block hash's state shall not yet have been pruned in `rpc.polkadot.io`. -//! -//! ```bash -//! ./substrate-try-runtime \ -//! try-runtime \ -//! --runtime kitchensink_runtime.wasm \ -//! -lruntime=debug \ -//! on-runtime-upgrade \ -//! live --uri ws://localhost:9999 \ -//! # replace with your desired block hash! -//! --at 0xa1b16c1efd889a9f17375ec4dd5c1b4351a2be17fa069564fced10d23b9b3836 -//! ``` -//! -//! * Executing the same command with the [`Runtime::Existing`] will fail because the existing -//! runtime, stored onchain in `substrate` binary that we compiled earlier does not have -//! `try-runtime` feature! -//! -//! ```bash -//! ./substrate-try-runtime try-runtime --runtime existing -lruntime=debug on-runtime-upgrade live --uri ws://localhost:9999 -//! ... -//! Error: Input("given runtime is NOT compiled with try-runtime feature!") -//! ``` -//! -//! * Now, let's use a snapshot file. First, we create the snapshot: -//! -//! ```bash -//! ./substrate-try-runtime try-runtime --runtime existing -lruntime=debug create-snapshot --uri ws://localhost:9999 -//! 2022-12-13 10:28:17.516 INFO main try-runtime::cli: snapshot path not provided (-s), using 'node-268@latest.snap' -//! 2022-12-13 10:28:17.516 INFO main remote-ext: since no at is provided, setting it to latest finalized head, 0xe7d0b614dfe89af65b33577aae46a6f958c974bf52f8a5e865a0f4faeb578d22 -//! 2022-12-13 10:28:17.516 INFO main remote-ext: since no prefix is filtered, the data for all pallets will be downloaded -//! 2022-12-13 10:28:17.550 INFO main remote-ext: writing snapshot of 1611464 bytes to "node-268@latest.snap" -//! 2022-12-13 10:28:17.551 INFO main remote-ext: initialized state externalities with storage root 0x925e4e95de4c08474fb7f976c4472fa9b8a1091619cd7820a793bf796ee6d932 and state_version V1 -//! ``` -//! -//! > Note that the snapshot contains the `existing` runtime, which does not have the correct -//! > `try-runtime` feature. In the following commands, we still need to overwrite the runtime. -//! -//! Then, we can use it to have the same command as before, `on-runtime-upgrade` -//! -//! ```bash -//! try-runtime \ -//! --runtime runtime-try-runtime.wasm \ -//! -lruntime=debug \ -//! on-runtime-upgrade \ -//! snap -s node-268@latest.snap -//! ``` -//! -//! * Execute the latest finalized block with the given runtime. -//! -//! ```bash -//! ./substrate-try-runtime try-runtime \ -//! --runtime runtime-try-runtime.wasm \ -//! -lruntime=debug \ -//! execute-block live \ -//! --uri ws://localhost:999 -//! ``` -//! -//! This can still be customized at a given block with `--at`. If you want to use a snapshot, you -//! can still use `--block-ws-uri` to provide a node form which the block data can be fetched. -//! -//! Moreover, this runs the `frame_support::try_runtime::TryState` hooks as well. The hooks to run -//! can be customized with the `--try-state`. For example: -//! -//! ```bash -//! ./substrate-try-runtime try-runtime \ -//! --runtime runtime-try-runtime.wasm \ -//! -lruntime=debug \ -//! execute-block live \ -//! --try-state System,Staking \ -//! --uri ws://localhost:999 -//! ``` -//! -//! Will only run the `try-state` of the two given pallets. See -//! [`frame_try_runtime::TryStateSelect`] for more information. -//! -//! * Follow our live chain's blocks using `follow-chain`, whilst running the try-state of 3 pallets -//! in a round robin fashion -//! -//! ```bash -//! ./substrate-try-runtime \ -//! try-runtime \ -//! --runtime runtime-try-runtime.wasm \ -//! -lruntime=debug \ -//! follow-chain \ -//! --uri ws://localhost:9999 \ -//! --try-state rr-3 -//! ``` +//! It is no longer maintained here and will be removed in the future. #![cfg(feature = "try-runtime")] @@ -349,7 +28,6 @@ use crate::block_building_info::BlockBuildingInfoProvider; use parity_scale_codec::Decode; use remote_externalities::{ Builder, Mode, OfflineConfig, OnlineConfig, RemoteExternalities, SnapshotConfig, - TestExternalities, }; use sc_cli::{ execution_method_from_cli, CliConfiguration, RuntimeVersion, WasmExecutionMethod, @@ -359,7 +37,6 @@ use sc_cli::{ use sc_executor::{ sp_wasm_interface::HostFunctions, HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }; -use sp_api::HashT; use sp_core::{ hexdisplay::HexDisplay, offchain::{ @@ -374,10 +51,12 @@ use sp_externalities::Extensions; use sp_inherents::InherentData; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ - traits::{BlakeTwo256, Block as BlockT, NumberFor}, + traits::{BlakeTwo256, Block as BlockT, Hash as HashT, HashingFor, NumberFor}, DeserializeOwned, Digest, }; -use sp_state_machine::{CompactProof, OverlayedChanges, StateMachine, TrieBackendBuilder}; +use sp_state_machine::{ + CompactProof, OverlayedChanges, StateMachine, TestExternalities, TrieBackendBuilder, +}; use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; @@ -611,9 +290,7 @@ impl State { try_runtime_check: bool, ) -> sc_cli::Result> where - Block::Hash: FromStr, Block::Header: DeserializeOwned, - Block::Hash: DeserializeOwned, ::Err: Debug, { let builder = match self { @@ -717,7 +394,13 @@ impl State { } } +pub const DEPRECATION_NOTICE: &str = "Substrate's `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI."; + impl TryRuntimeCmd { + // Can't reuse DEPRECATION_NOTICE in the deprecated macro + #[deprecated( + note = "Substrate's `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI." + )] pub async fn run( &self, block_building_info_provider: Option, @@ -725,7 +408,6 @@ impl TryRuntimeCmd { where Block: BlockT + DeserializeOwned, Block::Header: DeserializeOwned, - Block::Hash: FromStr, ::Err: Debug, as FromStr>::Err: Debug, as TryInto>::Error: Debug, @@ -791,7 +473,6 @@ impl CliConfiguration for TryRuntimeCmd { /// Get the hash type of the generic `Block` from a `hash_str`. pub(crate) fn hash_of(hash_str: &str) -> sc_cli::Result where - Block::Hash: FromStr, ::Err: Debug, { hash_str @@ -833,7 +514,7 @@ pub(crate) fn build_executor(shared: &SharedParams) -> WasmExe /// Ensure that the given `ext` is compiled with `try-runtime` fn ensure_try_runtime( executor: &WasmExecutor, - ext: &mut TestExternalities, + ext: &mut TestExternalities>, ) -> bool { use sp_api::RuntimeApiInfo; let final_code = ext @@ -851,12 +532,12 @@ fn ensure_try_runtime( /// Execute the given `method` and `data` on top of `ext`, returning the results (encoded) and the /// state `changes`. pub(crate) fn state_machine_call( - ext: &TestExternalities, + ext: &TestExternalities>, executor: &WasmExecutor, method: &'static str, data: &[u8], - extensions: Extensions, -) -> sc_cli::Result<(OverlayedChanges, Vec)> { + mut extensions: Extensions, +) -> sc_cli::Result<(OverlayedChanges>, Vec)> { let mut changes = Default::default(); let encoded_results = StateMachine::new( &ext.backend, @@ -864,11 +545,11 @@ pub(crate) fn state_machine_call( executor, method, data, - extensions, + &mut extensions, &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, CallContext::Offchain, ) - .execute(sp_state_machine::ExecutionStrategy::AlwaysWasm) + .execute() .map_err(|e| format!("failed to execute '{}': {}", method, e)) .map_err::(Into::into)?; @@ -880,13 +561,13 @@ pub(crate) fn state_machine_call( /// /// Make sure [`LOG_TARGET`] is enabled in logging. pub(crate) fn state_machine_call_with_proof( - ext: &TestExternalities, + ext: &TestExternalities>, executor: &WasmExecutor, method: &'static str, data: &[u8], - extensions: Extensions, + mut extensions: Extensions, maybe_export_proof: Option, -) -> sc_cli::Result<(OverlayedChanges, Vec)> { +) -> sc_cli::Result<(OverlayedChanges>, Vec)> { use parity_scale_codec::Encode; let mut changes = Default::default(); @@ -903,11 +584,11 @@ pub(crate) fn state_machine_call_with_proof(Into::into)?; @@ -943,7 +624,7 @@ pub(crate) fn state_machine_call_with_proof(pre_root) + .into_compact_proof::>(pre_root) .map_err(|e| { log::error!(target: LOG_TARGET, "failed to generate compact proof {}: {:?}", method, e); e diff --git a/utils/frame/try-runtime/cli/tests/follow_chain.rs b/utils/frame/try-runtime/cli/tests/follow_chain.rs deleted file mode 100644 index a4961aa280171..0000000000000 --- a/utils/frame/try-runtime/cli/tests/follow_chain.rs +++ /dev/null @@ -1,65 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -#![cfg(unix)] - -#[cfg(feature = "try-runtime")] -mod tests { - use assert_cmd::cargo::cargo_bin; - use regex::Regex; - use std::{ - process::{self}, - time::Duration, - }; - use substrate_cli_test_utils as common; - use tokio::process::{Child, Command}; - - #[tokio::test] - async fn follow_chain_works() { - // Build substrate so binaries used in the test use the latest code. - common::build_substrate(&["--features=try-runtime"]); - - common::run_with_timeout(Duration::from_secs(60), async move { - fn start_follow(ws_url: &str) -> Child { - Command::new(cargo_bin("substrate")) - .stdout(process::Stdio::piped()) - .stderr(process::Stdio::piped()) - .args(&["try-runtime", "--runtime=existing"]) - .args(&["follow-chain", format!("--uri={}", ws_url).as_str()]) - .kill_on_drop(true) - .spawn() - .unwrap() - } - - // Start a node and wait for it to begin finalizing blocks - let mut node = common::KillChildOnDrop(common::start_node()); - let ws_url = common::extract_info_from_output(node.stderr.take().unwrap()).0.ws_url; - common::wait_n_finalized_blocks(1, &ws_url).await; - - // Kick off the follow-chain process and wait for it to process at least 3 blocks. - let mut follow = start_follow(&ws_url); - let re = Regex::new(r#".*executed block ([3-9]|[1-9]\d+).*"#).unwrap(); - let matched = - common::wait_for_stream_pattern_match(follow.stderr.take().unwrap(), re).await; - - // Assert that the follow-chain process has followed at least 3 blocks. - assert!(matches!(matched, Ok(_))); - }) - .await; - } -} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 5228f26d57c6d..13f344ebfa3a7 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -22,4 +22,5 @@ toml = "0.7.3" walkdir = "2.3.2" sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" -wasm-opt = "0.112" \ No newline at end of file +wasm-opt = "0.114" +parity-wasm = "0.45" diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs index 72d32445e8da5..208b56077669e 100644 --- a/utils/wasm-builder/src/builder.rs +++ b/utils/wasm-builder/src/builder.rs @@ -48,6 +48,7 @@ impl WasmBuilderSelectProject { file_name: None, project_cargo_toml: get_manifest_dir().join("Cargo.toml"), features_to_enable: Vec::new(), + disable_runtime_version_section_check: false, } } @@ -63,6 +64,7 @@ impl WasmBuilderSelectProject { file_name: None, project_cargo_toml: path, features_to_enable: Vec::new(), + disable_runtime_version_section_check: false, }) } else { Err("Project path must point to the `Cargo.toml` of the project") @@ -78,8 +80,8 @@ impl WasmBuilderSelectProject { /// /// 1. Call [`WasmBuilder::new`] to create a new builder. /// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. -/// 3. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code -/// using methods of [`WasmBuilder`]. +/// 3. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code using +/// methods of [`WasmBuilder`]. /// 4. Build the WASM binary using [`Self::build`]. pub struct WasmBuilder { /// Flags that should be appended to `RUST_FLAGS` env variable. @@ -93,6 +95,8 @@ pub struct WasmBuilder { project_cargo_toml: PathBuf, /// Features that should be enabled when building the wasm binary. features_to_enable: Vec, + /// Should the builder not check that the `runtime_version` section exists in the wasm binary? + disable_runtime_version_section_check: bool, } impl WasmBuilder { @@ -143,6 +147,17 @@ impl WasmBuilder { self } + /// Disable the check for the `runtime_version` wasm section. + /// + /// By default the `wasm-builder` will ensure that the `runtime_version` section will + /// exists in the build wasm binary. This `runtime_version` section is used to get the + /// `RuntimeVersion` without needing to call into the wasm binary. However, for some + /// use cases (like tests) you may want to disable this check. + pub fn disable_runtime_version_section_check(mut self) -> Self { + self.disable_runtime_version_section_check = true; + self + } + /// Build the WASM binary. pub fn build(self) { let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); @@ -165,6 +180,7 @@ impl WasmBuilder { self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect(), self.features_to_enable, self.file_name, + !self.disable_runtime_version_section_check, ); // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't @@ -215,7 +231,7 @@ fn generate_rerun_if_changed_instructions() { /// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. /// /// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. +/// constant `WASM_BINARY`, which contains the built wasm binary. /// /// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. /// @@ -224,14 +240,17 @@ fn generate_rerun_if_changed_instructions() { /// `features_to_enable` - Features that should be enabled for the project. /// /// `wasm_binary_name` - The optional wasm binary name that is extended with -/// /// `.compact.compressed.wasm`. If `None`, the project name will be used. +/// +/// `check_for_runtime_version_section` - Should the wasm binary be checked for the +/// `runtime_version` section? fn build_project( file_name: PathBuf, project_cargo_toml: PathBuf, default_rustflags: String, features_to_enable: Vec, wasm_binary_name: Option, + check_for_runtime_version_section: bool, ) { let cargo_cmd = match crate::prerequisites::check() { Ok(cmd) => cmd, @@ -247,6 +266,7 @@ fn build_project( cargo_cmd, features_to_enable, wasm_binary_name, + check_for_runtime_version_section, ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 8405b5a0bda9e..c9011f97be711 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -222,7 +222,7 @@ fn get_rustup_command() -> Option { continue } - let Some(cargo_version) = cmd.version() else { continue; }; + let Some(cargo_version) = cmd.version() else { continue }; versions.push((cargo_version, rustup_version.to_string())); } diff --git a/utils/wasm-builder/src/version.rs b/utils/wasm-builder/src/version.rs index 77e62b394bd55..e4f7d98be6187 100644 --- a/utils/wasm-builder/src/version.rs +++ b/utils/wasm-builder/src/version.rs @@ -24,9 +24,9 @@ pub struct Version { pub minor: u32, pub patch: u32, pub is_nightly: bool, - pub year: u32, - pub month: u32, - pub day: u32, + pub year: Option, + pub month: Option, + pub day: Option, } impl Version { @@ -62,25 +62,24 @@ impl Version { return None } - let date = version.split(" ").nth(3)?; - - let date_parts = date - .split("-") - .filter_map(|v| v.trim().strip_suffix(")").unwrap_or(v).parse().ok()) - .collect::>(); - - if date_parts.len() != 3 { - return None - } + let date_parts = version + .split(" ") + .nth(3) + .map(|date| { + date.split("-") + .filter_map(|v| v.trim().strip_suffix(")").unwrap_or(v).parse().ok()) + .collect::>() + }) + .unwrap_or_default(); Some(Version { major: version_parts[0], minor: version_parts[1], patch: version_parts[2], is_nightly, - year: date_parts[0], - month: date_parts[1], - day: date_parts[2], + year: date_parts.get(0).copied(), + month: date_parts.get(1).copied(), + day: date_parts.get(2).copied(), }) } } @@ -104,9 +103,9 @@ impl Ord for Version { } let to_compare = [ - (self.major, other.major), - (self.minor, other.minor), - (self.patch, other.patch), + (Some(self.major), Some(other.major)), + (Some(self.minor), Some(other.minor)), + (Some(self.patch), Some(other.patch)), (self.year, other.year), (self.month, other.month), (self.day, other.day), @@ -188,11 +187,29 @@ mod tests { minor: 66, patch: 0, is_nightly: false, - year: 2022, - month: 11, - day: 15 + year: Some(2022), + month: Some(11), + day: Some(15), + }, + version_1_66_0, + ); + } + + #[test] + fn version_without_hash_and_date() { + // Apparently there are installations that print without the hash and date. + let version_1_69_0 = Version::extract("cargo 1.69.0-nightly").unwrap(); + assert_eq!( + Version { + major: 1, + minor: 69, + patch: 0, + is_nightly: true, + year: None, + month: None, + day: None, }, - version_1_66_0 + version_1_69_0, ); } } diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index c45a40a6b9202..849af853c6da4 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -19,6 +19,7 @@ use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; +use parity_wasm::elements::{deserialize_buffer, Module}; use std::{ borrow::ToOwned, collections::HashSet, @@ -116,6 +117,7 @@ pub(crate) fn create_and_compile( cargo_cmd: CargoCommandVersioned, features_to_enable: Vec, wasm_binary_name: Option, + check_for_runtime_version_section: bool, ) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); @@ -134,6 +136,10 @@ pub(crate) fn create_and_compile( let (wasm_binary, wasm_binary_compressed, bloaty) = compact_wasm_file(&project, profile, project_cargo_toml, wasm_binary_name); + if check_for_runtime_version_section { + ensure_runtime_version_wasm_section_exists(bloaty.wasm_binary_bloaty_path()); + } + wasm_binary .as_ref() .map(|wasm_binary| copy_wasm_to_target_directory(project_cargo_toml, wasm_binary)); @@ -159,6 +165,29 @@ pub(crate) fn create_and_compile( (final_wasm_binary, bloaty) } +/// Ensures that the `runtime_version` wasm section exists in the given wasm file. +/// +/// If the section can not be found, it will print an error and exit the builder. +fn ensure_runtime_version_wasm_section_exists(wasm: &Path) { + let wasm_blob = fs::read(wasm).expect("`{wasm}` was just written and should exist; qed"); + + let module: Module = match deserialize_buffer(&wasm_blob) { + Ok(m) => m, + Err(e) => { + println!("Failed to deserialize `{}`: {e:?}", wasm.display()); + process::exit(1); + }, + }; + + if !module.custom_sections().any(|cs| cs.name() == "runtime_version") { + println!( + "Couldn't find the `runtime_version` wasm section. \ + Please ensure that you are using the `sp_version::runtime_version` attribute macro!" + ); + process::exit(1); + } +} + /// Adjust the mtime of the bloaty and compressed/compact wasm files. /// /// We add the bloaty and the compressed/compact wasm file to the `rerun-if-changed` files.